From 3c4e77d171c31d19cb72382cfb2429362ea52172 Mon Sep 17 00:00:00 2001 From: Fernando Briano Date: Wed, 7 May 2025 16:23:38 +0100 Subject: [PATCH 1/3] [API] Updates source code docs --- .../lib/elasticsearch/api/actions/cat/nodes.rb | 9 +++++---- .../api/actions/inference/chat_completion_unified.rb | 8 ++++++++ .../lib/elasticsearch/api/actions/inference/put.rb | 5 ----- .../api/actions/inference/put_alibabacloud.rb | 5 ----- .../elasticsearch/api/actions/inference/put_anthropic.rb | 5 ----- .../api/actions/inference/put_azureaistudio.rb | 5 ----- .../api/actions/inference/put_azureopenai.rb | 5 ----- .../elasticsearch/api/actions/inference/put_cohere.rb | 5 ----- .../api/actions/inference/put_googleaistudio.rb | 5 ----- .../api/actions/inference/put_googlevertexai.rb | 5 ----- .../api/actions/inference/put_hugging_face.rb | 5 ----- .../elasticsearch/api/actions/inference/put_jinaai.rb | 5 ----- .../elasticsearch/api/actions/inference/put_mistral.rb | 5 ----- .../elasticsearch/api/actions/inference/put_openai.rb | 5 ----- .../elasticsearch/api/actions/inference/put_watsonx.rb | 5 ----- elasticsearch-api/lib/elasticsearch/api/version.rb | 2 +- 16 files changed, 14 insertions(+), 70 deletions(-) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb index e5bb34ad56..c76b004be1 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb @@ -29,12 +29,13 @@ module Actions # @option arguments [String] :bytes The unit used to display byte values. # @option arguments [Boolean, String] :full_id If +true+, return the full node ID. If +false+, return the shortened node ID. Server default: false. # @option arguments [Boolean] :include_unloaded_segments If true, the response includes information from segments that are not loaded into memory. - # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. - # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. + # @option arguments [String, Array] :h A comma-separated list of columns names to display. + # It supports simple wildcards. Server default: ip,hp,rp,r,m,n,cpu,l. + # @option arguments [String, Array] :s A comma-separated list of column names or aliases that determines the sort order. # Sorting defaults to ascending and can be changed by setting +:asc+ # or +:desc+ as a suffix to the column name. - # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. - # @option arguments [String] :time Unit used to display time values. + # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. Server default: 30s. + # @option arguments [String] :time The unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. # @option arguments [Boolean] :help When set to +true+ will output available columns. This option diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/chat_completion_unified.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/chat_completion_unified.rb index 03c56cd213..a300350393 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/chat_completion_unified.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/chat_completion_unified.rb @@ -23,6 +23,14 @@ module API module Inference module Actions # Perform chat completion inference + # The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. + # It only works with the +chat_completion+ task type for +openai+ and +elastic+ inference services. + # IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + # For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + # NOTE: The +chat_completion+ task type is only available within the _stream API and only supports streaming. + # The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. + # The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. + # If you use the +openai+ service or the +elastic+ service, use the Chat completion inference API. # # @option arguments [String] :inference_id The inference Id (*Required*) # @option arguments [Time] :timeout Specifies the amount of time to wait for the inference request to complete. Server default: 30s. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put.rb index 979e40cd03..9c57aa0b9a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put.rb @@ -23,11 +23,6 @@ module API module Inference module Actions # Create an inference endpoint. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. # For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. # However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_alibabacloud.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_alibabacloud.rb index 6622c6a839..545a7fea3e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_alibabacloud.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_alibabacloud.rb @@ -24,11 +24,6 @@ module Inference module Actions # Create an AlibabaCloud AI Search inference endpoint. # Create an inference endpoint to perform an inference task with the +alibabacloud-ai-search+ service. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :alibabacloud_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_anthropic.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_anthropic.rb index 57b4078bb9..9ecc1a2259 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_anthropic.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_anthropic.rb @@ -24,11 +24,6 @@ module Inference module Actions # Create an Anthropic inference endpoint. # Create an inference endpoint to perform an inference task with the +anthropic+ service. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The task type. # The only valid task type for the model to perform is +completion+. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureaistudio.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureaistudio.rb index e28ea8a7f7..bde6b1e92c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureaistudio.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureaistudio.rb @@ -24,11 +24,6 @@ module Inference module Actions # Create an Azure AI studio inference endpoint. # Create an inference endpoint to perform an inference task with the +azureaistudio+ service. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :azureaistudio_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureopenai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureopenai.rb index d2043dcb1e..fb6bd70ab0 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureopenai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureopenai.rb @@ -28,11 +28,6 @@ module Actions # * {https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models GPT-4 and GPT-4 Turbo models} # * {https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35 GPT-3.5} # The list of embeddings models that you can choose from in your deployment can be found in the {https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings Azure models documentation}. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. # NOTE: The +chat_completion+ task type only supports streaming and only through the _stream API. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_cohere.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_cohere.rb index b9fff487e6..aa838b5c1a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_cohere.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_cohere.rb @@ -24,11 +24,6 @@ module Inference module Actions # Create a Cohere inference endpoint. # Create an inference endpoint to perform an inference task with the +cohere+ service. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :cohere_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googleaistudio.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googleaistudio.rb index 31f9d0f3ad..a0514a4e9d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googleaistudio.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googleaistudio.rb @@ -24,11 +24,6 @@ module Inference module Actions # Create an Google AI Studio inference endpoint. # Create an inference endpoint to perform an inference task with the +googleaistudio+ service. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :googleaistudio_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googlevertexai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googlevertexai.rb index c3d8f45bf7..7cc7d809c7 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googlevertexai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googlevertexai.rb @@ -24,11 +24,6 @@ module Inference module Actions # Create a Google Vertex AI inference endpoint. # Create an inference endpoint to perform an inference task with the +googlevertexai+ service. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :googlevertexai_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_hugging_face.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_hugging_face.rb index 75d0f80753..fab5f7b154 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_hugging_face.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_hugging_face.rb @@ -35,11 +35,6 @@ module Actions # * +e5-small-v2+ # * +multilingual-e5-base+ # * +multilingual-e5-small+ - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :huggingface_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_jinaai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_jinaai.rb index a8f63c0e99..ace5a1d695 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_jinaai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_jinaai.rb @@ -26,11 +26,6 @@ module Actions # Create an inference endpoint to perform an inference task with the +jinaai+ service. # To review the available +rerank+ models, refer to . # To review the available +text_embedding+ models, refer to the . - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :jinaai_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_mistral.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_mistral.rb index dbdb9d1f06..3fd32dc48a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_mistral.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_mistral.rb @@ -24,11 +24,6 @@ module Inference module Actions # Create a Mistral inference endpoint. # Creates an inference endpoint to perform an inference task with the +mistral+ service. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The task type. # The only valid task type for the model to perform is +text_embedding+. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_openai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_openai.rb index c5a338f91b..e7bfb746f9 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_openai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_openai.rb @@ -24,11 +24,6 @@ module Inference module Actions # Create an OpenAI inference endpoint. # Create an inference endpoint to perform an inference task with the +openai+ service or +openai+ compatible APIs. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. # NOTE: The +chat_completion+ task type only supports streaming and only through the _stream API. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_watsonx.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_watsonx.rb index 1e8c20ab24..c57f7f6099 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_watsonx.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_watsonx.rb @@ -26,11 +26,6 @@ module Actions # Create an inference endpoint to perform an inference task with the +watsonxai+ service. # You need an IBM Cloud Databases for Elasticsearch deployment to use the +watsonxai+ inference service. # You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. - # When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - # After creating the endpoint, wait for the model deployment to complete before using it. - # To verify the deployment status, use the get trained model statistics API. - # Look for +"state": "fully_allocated"+ in the response and ensure that the +"allocation_count"+ matches the +"target_allocation_count"+. - # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The task type. # The only valid task type for the model to perform is +text_embedding+. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/version.rb b/elasticsearch-api/lib/elasticsearch/api/version.rb index 186c290dc7..59234fedfe 100644 --- a/elasticsearch-api/lib/elasticsearch/api/version.rb +++ b/elasticsearch-api/lib/elasticsearch/api/version.rb @@ -18,6 +18,6 @@ module Elasticsearch module API VERSION = '9.0.0'.freeze - ES_SPECIFICATION_COMMIT = 'c7ee66d99d0c1d6906bdca079a868c187de23c69'.freeze + ES_SPECIFICATION_COMMIT = '7a7e2a257032dcadda0f1aeec9344598a2e12913'.freeze end end From 0e2968b3acf1e402f1f71465d6507da565122137 Mon Sep 17 00:00:00 2001 From: Fernando Briano Date: Wed, 7 May 2025 16:32:19 +0100 Subject: [PATCH 2/3] [API] Updates code display in source code documentation Use `rake doc` to generate reference docs. --- .../api/actions/async_search/delete.rb | 2 +- .../api/actions/async_search/get.rb | 2 +- .../api/actions/async_search/status.rb | 2 +- .../api/actions/async_search/submit.rb | 8 +- .../lib/elasticsearch/api/actions/bulk.rb | 134 ++++++++++-------- .../elasticsearch/api/actions/cat/aliases.rb | 16 +-- .../api/actions/cat/allocation.rb | 14 +- .../api/actions/cat/component_templates.rb | 14 +- .../elasticsearch/api/actions/cat/count.rb | 14 +- .../api/actions/cat/fielddata.rb | 10 +- .../elasticsearch/api/actions/cat/health.rb | 16 +-- .../elasticsearch/api/actions/cat/indices.rb | 12 +- .../elasticsearch/api/actions/cat/master.rb | 14 +- .../actions/cat/ml_data_frame_analytics.rb | 8 +- .../api/actions/cat/ml_datafeeds.rb | 14 +- .../elasticsearch/api/actions/cat/ml_jobs.rb | 16 +-- .../api/actions/cat/ml_trained_models.rb | 12 +- .../api/actions/cat/nodeattrs.rb | 14 +- .../elasticsearch/api/actions/cat/nodes.rb | 12 +- .../api/actions/cat/pending_tasks.rb | 14 +- .../elasticsearch/api/actions/cat/plugins.rb | 14 +- .../elasticsearch/api/actions/cat/recovery.rb | 16 +-- .../api/actions/cat/repositories.rb | 14 +- .../elasticsearch/api/actions/cat/segments.rb | 18 +-- .../elasticsearch/api/actions/cat/shards.rb | 14 +- .../api/actions/cat/snapshots.rb | 14 +- .../elasticsearch/api/actions/cat/tasks.rb | 14 +- .../api/actions/cat/templates.rb | 14 +- .../api/actions/cat/thread_pool.rb | 14 +- .../api/actions/cat/transforms.rb | 12 +- .../elasticsearch/api/actions/clear_scroll.rb | 2 +- .../api/actions/close_point_in_time.rb | 4 +- .../actions/cluster/get_component_template.rb | 8 +- .../api/actions/cluster/get_settings.rb | 4 +- .../api/actions/cluster/health.rb | 2 +- .../api/actions/cluster/pending_tasks.rb | 4 +- .../cluster/post_voting_config_exclusions.rb | 8 +- .../actions/cluster/put_component_template.rb | 14 +- .../api/actions/cluster/put_settings.rb | 10 +- .../api/actions/cluster/reroute.rb | 8 +- .../api/actions/cluster/state.rb | 6 +- .../api/actions/cluster/stats.rb | 2 +- .../api/actions/connector/check_in.rb | 2 +- .../api/actions/connector/sync_job_cancel.rb | 2 +- .../actions/connector/sync_job_check_in.rb | 2 +- .../api/actions/connector/sync_job_claim.rb | 4 +- .../api/actions/connector/sync_job_error.rb | 2 +- .../connector/sync_job_update_stats.rb | 4 +- .../actions/connector/update_api_key_id.rb | 2 +- .../actions/connector/update_index_name.rb | 2 +- .../lib/elasticsearch/api/actions/count.rb | 34 ++--- .../lib/elasticsearch/api/actions/create.rb | 76 +++++----- .../delete_auto_follow_pattern.rb | 2 +- .../cross_cluster_replication/follow_info.rb | 2 +- .../get_auto_follow_pattern.rb | 2 +- .../pause_auto_follow_pattern.rb | 2 +- .../cross_cluster_replication/pause_follow.rb | 2 +- .../resume_auto_follow_pattern.rb | 2 +- .../cross_cluster_replication/stats.rb | 2 +- .../cross_cluster_replication/unfollow.rb | 2 +- .../dangling_indices/delete_dangling_index.rb | 2 +- .../dangling_indices/import_dangling_index.rb | 2 +- .../dangling_indices/list_dangling_indices.rb | 2 +- .../lib/elasticsearch/api/actions/delete.rb | 24 ++-- .../api/actions/delete_by_query.rb | 98 +++++++------ .../api/actions/delete_by_query_rethrottle.rb | 2 +- .../api/actions/delete_script.rb | 4 +- .../api/actions/enrich/execute_policy.rb | 2 +- .../elasticsearch/api/actions/eql/delete.rb | 2 +- .../api/actions/esql/async_query.rb | 14 +- .../api/actions/esql/async_query_delete.rb | 4 +- .../api/actions/esql/async_query_get.rb | 8 +- .../api/actions/esql/async_query_stop.rb | 6 +- .../elasticsearch/api/actions/esql/query.rb | 8 +- .../lib/elasticsearch/api/actions/exists.rb | 36 ++--- .../api/actions/exists_source.rb | 16 ++- .../lib/elasticsearch/api/actions/explain.rb | 26 ++-- .../api/actions/features/get_features.rb | 2 +- .../elasticsearch/api/actions/field_caps.rb | 12 +- .../api/actions/fleet/global_checkpoints.rb | 8 +- .../api/actions/fleet/msearch.rb | 4 +- .../elasticsearch/api/actions/fleet/search.rb | 2 +- .../lib/elasticsearch/api/actions/get.rb | 62 ++++---- .../elasticsearch/api/actions/get_script.rb | 2 +- .../elasticsearch/api/actions/get_source.rb | 21 +-- .../api/actions/graph/explore.rb | 2 +- .../lib/elasticsearch/api/actions/index.rb | 84 +++++------ .../explain_lifecycle.rb | 4 +- .../migrate_to_data_tiers.rb | 4 +- .../move_to_step.rb | 2 +- .../api/actions/indices/add_block.rb | 16 +-- .../api/actions/indices/analyze.rb | 6 +- .../api/actions/indices/clear_cache.rb | 26 ++-- .../api/actions/indices/clone.rb | 10 +- .../api/actions/indices/close.rb | 16 +-- .../api/actions/indices/create.rb | 24 ++-- .../api/actions/indices/create_data_stream.rb | 6 +- .../api/actions/indices/data_streams_stats.rb | 6 +- .../api/actions/indices/delete.rb | 12 +- .../api/actions/indices/delete_alias.rb | 4 +- .../actions/indices/delete_data_lifecycle.rb | 2 +- .../api/actions/indices/delete_data_stream.rb | 4 +- .../api/actions/indices/delete_template.rb | 2 +- .../api/actions/indices/disk_usage.rb | 18 +-- .../api/actions/indices/downsample.rb | 4 +- .../api/actions/indices/exists.rb | 16 +-- .../api/actions/indices/exists_alias.rb | 14 +- .../api/actions/indices/exists_template.rb | 4 +- .../api/actions/indices/field_usage_stats.rb | 8 +- .../api/actions/indices/flush.rb | 18 +-- .../api/actions/indices/forcemerge.rb | 21 +-- .../api/actions/indices/get_alias.rb | 16 +-- .../api/actions/indices/get_data_lifecycle.rb | 10 +- .../api/actions/indices/get_data_stream.rb | 4 +- .../api/actions/indices/get_field_mapping.rb | 16 +-- .../api/actions/indices/get_mapping.rb | 14 +- .../api/actions/indices/get_settings.rb | 24 ++-- .../api/actions/indices/get_template.rb | 8 +- .../actions/indices/migrate_to_data_stream.rb | 2 +- .../elasticsearch/api/actions/indices/open.rb | 24 ++-- .../api/actions/indices/put_alias.rb | 2 +- .../api/actions/indices/put_data_lifecycle.rb | 8 +- .../api/actions/indices/put_index_template.rb | 10 +- .../api/actions/indices/put_mapping.rb | 14 +- .../api/actions/indices/put_settings.rb | 24 ++-- .../api/actions/indices/put_template.rb | 2 +- .../api/actions/indices/recovery.rb | 8 +- .../api/actions/indices/refresh.rb | 16 +-- .../indices/reload_search_analyzers.rb | 6 +- .../api/actions/indices/resolve_cluster.rb | 48 +++---- .../api/actions/indices/resolve_index.rb | 12 +- .../api/actions/indices/rollover.rb | 20 +-- .../api/actions/indices/segments.rb | 12 +- .../api/actions/indices/shrink.rb | 4 +- .../api/actions/indices/split.rb | 12 +- .../api/actions/indices/stats.rb | 12 +- .../api/actions/indices/validate_query.rb | 28 ++-- .../inference/chat_completion_unified.rb | 6 +- .../api/actions/inference/put_alibabacloud.rb | 2 +- .../actions/inference/put_amazonbedrock.rb | 2 +- .../api/actions/inference/put_anthropic.rb | 4 +- .../actions/inference/put_azureaistudio.rb | 2 +- .../api/actions/inference/put_azureopenai.rb | 4 +- .../api/actions/inference/put_cohere.rb | 2 +- .../actions/inference/put_elasticsearch.rb | 4 +- .../api/actions/inference/put_elser.rb | 2 +- .../actions/inference/put_googleaistudio.rb | 2 +- .../actions/inference/put_googlevertexai.rb | 2 +- .../api/actions/inference/put_hugging_face.rb | 18 +-- .../api/actions/inference/put_jinaai.rb | 6 +- .../api/actions/inference/put_mistral.rb | 4 +- .../api/actions/inference/put_openai.rb | 4 +- .../api/actions/inference/put_voyageai.rb | 2 +- .../api/actions/inference/put_watsonx.rb | 6 +- .../actions/inference/stream_completion.rb | 2 +- .../api/actions/inference/update.rb | 2 +- .../ingest/delete_ip_location_database.rb | 4 +- .../api/actions/ingest/delete_pipeline.rb | 2 +- .../api/actions/ingest/get_geoip_database.rb | 4 +- .../ingest/get_ip_location_database.rb | 6 +- .../api/actions/ingest/get_pipeline.rb | 4 +- .../ingest/put_ip_location_database.rb | 4 +- .../api/actions/ingest/simulate.rb | 4 +- .../elasticsearch/api/actions/license/get.rb | 4 +- .../api/actions/license/post_start_basic.rb | 2 +- .../api/actions/machine_learning/close_job.rb | 8 +- .../delete_data_frame_analytics.rb | 2 +- .../machine_learning/delete_expired_data.rb | 4 +- .../machine_learning/delete_forecast.rb | 6 +- .../machine_learning/delete_model_snapshot.rb | 2 +- .../delete_trained_model_alias.rb | 2 +- .../api/actions/machine_learning/flush_job.rb | 4 +- .../api/actions/machine_learning/forecast.rb | 4 +- .../actions/machine_learning/get_buckets.rb | 8 +- .../machine_learning/get_calendar_events.rb | 4 +- .../actions/machine_learning/get_calendars.rb | 2 +- .../get_data_frame_analytics.rb | 4 +- .../get_data_frame_analytics_stats.rb | 4 +- .../machine_learning/get_datafeed_stats.rb | 14 +- .../actions/machine_learning/get_datafeeds.rb | 12 +- .../machine_learning/get_influencers.rb | 4 +- .../actions/machine_learning/get_job_stats.rb | 4 +- .../api/actions/machine_learning/get_jobs.rb | 6 +- .../machine_learning/get_memory_stats.rb | 4 +- .../get_model_snapshot_upgrade_stats.rb | 4 +- .../machine_learning/get_model_snapshots.rb | 4 +- .../machine_learning/get_overall_buckets.rb | 36 ++--- .../actions/machine_learning/get_records.rb | 2 +- .../put_data_frame_analytics.rb | 2 +- .../actions/machine_learning/put_datafeed.rb | 18 ++- .../actions/machine_learning/put_filter.rb | 2 +- .../api/actions/machine_learning/put_job.rb | 22 +-- .../machine_learning/put_trained_model.rb | 2 +- .../put_trained_model_definition_part.rb | 2 +- .../put_trained_model_vocabulary.rb | 2 +- .../machine_learning/revert_model_snapshot.rb | 2 +- .../machine_learning/set_upgrade_mode.rb | 2 +- .../start_data_frame_analytics.rb | 2 +- .../machine_learning/start_datafeed.rb | 14 +- .../start_trained_model_deployment.rb | 4 +- .../actions/machine_learning/stop_datafeed.rb | 10 +- .../stop_trained_model_deployment.rb | 4 +- .../machine_learning/update_datafeed.rb | 18 +-- .../lib/elasticsearch/api/actions/mget.rb | 26 ++-- .../lib/elasticsearch/api/actions/msearch.rb | 26 ++-- .../api/actions/msearch_template.rb | 19 +-- .../elasticsearch/api/actions/mtermvectors.rb | 18 +-- .../clear_repositories_metering_archive.rb | 2 +- .../elasticsearch/api/actions/nodes/stats.rb | 2 +- .../elasticsearch/api/actions/nodes/usage.rb | 4 +- .../api/actions/open_point_in_time.rb | 22 +-- .../elasticsearch/api/actions/put_script.rb | 4 +- .../api/actions/query_rules/put_ruleset.rb | 4 +- .../elasticsearch/api/actions/rank_eval.rb | 8 +- .../lib/elasticsearch/api/actions/reindex.rb | 124 ++++++++-------- .../api/actions/reindex_rethrottle.rb | 8 +- .../api/actions/render_search_template.rb | 2 +- .../lib/elasticsearch/api/actions/scroll.rb | 8 +- .../lib/elasticsearch/api/actions/search.rb | 118 +++++++-------- .../api/actions/search_application/put.rb | 2 +- .../search_application/render_query.rb | 4 +- .../elasticsearch/api/actions/search_mvt.rb | 54 +++---- .../api/actions/search_shards.rb | 22 +-- .../api/actions/search_template.rb | 26 ++-- .../searchable_snapshots/clear_cache.rb | 4 +- .../api/actions/searchable_snapshots/mount.rb | 2 +- .../actions/security/activate_user_profile.rb | 8 +- .../api/actions/security/bulk_delete_role.rb | 2 +- .../api/actions/security/bulk_put_role.rb | 2 +- .../actions/security/bulk_update_api_keys.rb | 4 +- .../api/actions/security/change_password.rb | 2 +- .../actions/security/clear_api_key_cache.rb | 2 +- .../security/clear_cached_privileges.rb | 2 +- .../actions/security/clear_cached_realms.rb | 2 +- .../actions/security/clear_cached_roles.rb | 2 +- .../security/clear_cached_service_tokens.rb | 8 +- .../api/actions/security/create_api_key.rb | 2 +- .../security/create_cross_cluster_api_key.rb | 6 +- .../actions/security/create_service_token.rb | 6 +- .../api/actions/security/delegate_pki.rb | 4 +- .../api/actions/security/delete_privileges.rb | 4 +- .../api/actions/security/delete_role.rb | 2 +- .../actions/security/delete_role_mapping.rb | 2 +- .../actions/security/delete_service_token.rb | 2 +- .../api/actions/security/delete_user.rb | 2 +- .../api/actions/security/disable_user.rb | 2 +- .../api/actions/security/enable_user.rb | 2 +- .../api/actions/security/get_api_key.rb | 16 +-- .../api/actions/security/get_privileges.rb | 2 +- .../actions/security/get_service_accounts.rb | 6 +- .../security/get_service_credentials.rb | 4 +- .../api/actions/security/get_settings.rb | 6 +- .../api/actions/security/get_token.rb | 4 +- .../api/actions/security/get_user_profile.rb | 8 +- .../actions/security/invalidate_api_key.rb | 16 +-- .../api/actions/security/invalidate_token.rb | 6 +- .../api/actions/security/oidc_logout.rb | 2 +- .../api/actions/security/put_privileges.rb | 12 +- .../api/actions/security/put_role.rb | 2 +- .../api/actions/security/put_role_mapping.rb | 8 +- .../api/actions/security/put_user.rb | 2 +- .../api/actions/security/query_api_keys.rb | 10 +- .../api/actions/security/saml_authenticate.rb | 2 +- .../api/actions/security/saml_invalidate.rb | 2 +- .../security/saml_prepare_authentication.rb | 6 +- .../actions/security/suggest_user_profiles.rb | 10 +- .../api/actions/security/update_api_key.rb | 8 +- .../security/update_cross_cluster_api_key.rb | 6 +- .../api/actions/security/update_settings.rb | 4 +- .../security/update_user_profile_data.rb | 8 +- .../actions/snapshot/cleanup_repository.rb | 4 +- .../api/actions/snapshot/clone.rb | 2 +- .../api/actions/snapshot/create.rb | 4 +- .../api/actions/snapshot/create_repository.rb | 10 +- .../api/actions/snapshot/delete.rb | 4 +- .../api/actions/snapshot/delete_repository.rb | 6 +- .../elasticsearch/api/actions/snapshot/get.rb | 36 ++--- .../api/actions/snapshot/get_repository.rb | 8 +- .../actions/snapshot/repository_analyze.rb | 18 +-- .../api/actions/snapshot/restore.rb | 12 +- .../api/actions/snapshot/status.rb | 14 +- .../api/actions/snapshot/verify_repository.rb | 4 +- .../get_status.rb | 4 +- .../put_lifecycle.rb | 4 +- .../snapshot_lifecycle_management/start.rb | 4 +- .../snapshot_lifecycle_management/stop.rb | 4 +- .../api/actions/sql/delete_async.rb | 2 +- .../api/actions/sql/get_async.rb | 4 +- .../elasticsearch/api/actions/sql/query.rb | 4 +- .../api/actions/sql/translate.rb | 2 +- .../api/actions/ssl/certificates.rb | 4 +- .../actions/synonyms/delete_synonym_rule.rb | 4 +- .../api/actions/synonyms/put_synonym.rb | 4 +- .../api/actions/synonyms/put_synonym_rule.rb | 4 +- .../elasticsearch/api/actions/tasks/cancel.rb | 2 +- .../elasticsearch/api/actions/tasks/get.rb | 2 +- .../elasticsearch/api/actions/tasks/list.rb | 30 ++-- .../elasticsearch/api/actions/terms_enum.rb | 4 +- .../elasticsearch/api/actions/termvectors.rb | 30 ++-- .../text_structure/find_field_structure.rb | 82 +++++------ .../text_structure/find_message_structure.rb | 82 +++++------ .../actions/text_structure/find_structure.rb | 88 ++++++------ .../text_structure/test_grok_pattern.rb | 2 +- .../api/actions/transform/get_transform.rb | 4 +- .../actions/transform/get_transform_stats.rb | 4 +- .../api/actions/transform/put_transform.rb | 14 +- .../api/actions/transform/reset_transform.rb | 4 +- .../transform/schedule_now_transform.rb | 2 +- .../api/actions/transform/start_transform.rb | 6 +- .../api/actions/transform/stop_transform.rb | 6 +- .../api/actions/transform/update_transform.rb | 6 +- .../lib/elasticsearch/api/actions/update.rb | 14 +- .../api/actions/update_by_query.rb | 106 +++++++------- .../api/actions/update_by_query_rethrottle.rb | 2 +- .../api/actions/watcher/ack_watch.rb | 4 +- .../api/actions/watcher/delete_watch.rb | 6 +- .../api/actions/watcher/execute_watch.rb | 2 +- .../api/actions/watcher/get_settings.rb | 4 +- .../api/actions/watcher/put_watch.rb | 12 +- .../api/actions/watcher/query_watches.rb | 2 +- .../elasticsearch/api/actions/watcher/stop.rb | 2 +- .../api/actions/watcher/update_settings.rb | 10 +- .../elasticsearch/api/actions/xpack/info.rb | 2 +- .../elasticsearch/api/actions/xpack/usage.rb | 2 +- 324 files changed, 1813 insertions(+), 1744 deletions(-) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/delete.rb index 940a47498d..63b762f5cc 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/delete.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/delete.rb @@ -25,7 +25,7 @@ module Actions # Delete an async search. # If the asynchronous search is still running, it is cancelled. # Otherwise, the saved search results are deleted. - # If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the +cancel_task+ cluster privilege. + # If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. # # @option arguments [String] :id A unique identifier for the async search. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/get.rb index 93e982c95e..9e5ea2750b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/get.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/get.rb @@ -28,7 +28,7 @@ module Actions # # @option arguments [String] :id A unique identifier for the async search. (*Required*) # @option arguments [Time] :keep_alive The length of time that the async search should be available in the cluster. - # When not specified, the +keep_alive+ set with the corresponding submit async request will be used. + # When not specified, the `keep_alive` set with the corresponding submit async request will be used. # Otherwise, it is possible to override the value and extend the validity of the request. # When this period expires, the search, if still running, is cancelled. # If the search is completed, its saved results are deleted. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/status.rb b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/status.rb index 67c4c0f650..76d5c9e890 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/status.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/status.rb @@ -26,7 +26,7 @@ module Actions # Get the status of a previously submitted async search request given its identifier, without retrieving search results. # If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: # * The user or API key that submitted the original async search request. - # * Users that have the +monitor+ cluster privilege or greater privileges. + # * Users that have the `monitor` cluster privilege or greater privileges. # # @option arguments [String] :id A unique identifier for the async search. (*Required*) # @option arguments [Time] :keep_alive The length of time that the async search needs to be available. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/submit.rb b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/submit.rb index 546cf0714c..5faec08413 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/submit.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/submit.rb @@ -26,15 +26,15 @@ module Actions # When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. # Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. # By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. - # The maximum allowed size for a stored async search response can be set by changing the +search.max_async_search_response_size+ cluster level setting. + # The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. # - # @option arguments [String, Array] :index A comma-separated list of index names to search; use +_all+ or empty string to perform the operation on all indices + # @option arguments [String, Array] :index A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices # @option arguments [Time] :wait_for_completion_timeout Blocks and waits until the search is completed up to a certain timeout. # When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. Server default: 1s. # @option arguments [Time] :keep_alive Specifies how long the async search needs to be available. # Ongoing async searches and any saved search results are deleted after this period. Server default: 5d. - # @option arguments [Boolean] :keep_on_completion If +true+, results are stored for later retrieval when the search completes within the +wait_for_completion_timeout+. - # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes +_all+ string or when no indices have been specified) + # @option arguments [Boolean] :keep_on_completion If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. + # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) # @option arguments [Boolean] :allow_partial_search_results Indicate if an error should be returned if there is a partial search failure or timeout # @option arguments [String] :analyzer The analyzer to use for the query string # @option arguments [Boolean] :analyze_wildcard Specify whether wildcard and prefix queries should be analyzed (default: false) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/bulk.rb b/elasticsearch-api/lib/elasticsearch/api/actions/bulk.rb index 868854b915..f54ace9ece 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/bulk.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/bulk.rb @@ -22,40 +22,48 @@ module Elasticsearch module API module Actions # Bulk index or delete documents. - # Perform multiple +index+, +create+, +delete+, and +update+ actions in a single request. + # Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. # This reduces overhead and can greatly increase indexing speed. # If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: - # * To use the +create+ action, you must have the +create_doc+, +create+, +index+, or +write+ index privilege. Data streams support only the +create+ action. - # * To use the +index+ action, you must have the +create+, +index+, or +write+ index privilege. - # * To use the +delete+ action, you must have the +delete+ or +write+ index privilege. - # * To use the +update+ action, you must have the +index+ or +write+ index privilege. - # * To automatically create a data stream or index with a bulk API request, you must have the +auto_configure+, +create_index+, or +manage+ index privilege. - # * To make the result of a bulk operation visible to search using the +refresh+ parameter, you must have the +maintenance+ or +manage+ index privilege. + # * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. + # * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. + # * To use the `delete` action, you must have the `delete` or `write` index privilege. + # * To use the `update` action, you must have the `index` or `write` index privilege. + # * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + # * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. # Automatic data stream creation requires a matching index template with data stream enabled. # The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: - # + - # action_and_meta_data\n - # optional_source\n - # action_and_meta_data\n - # optional_source\n + # + # ``` + # action_and_meta_data + # + # optional_source + # + # action_and_meta_data + # + # optional_source + # # .... - # action_and_meta_data\n - # optional_source\n - # + - # The +index+ and +create+ actions expect a source on the next line and have the same semantics as the +op_type+ parameter in the standard index API. - # A +create+ action fails if a document with the same ID already exists in the target - # An +index+ action adds or replaces a document as necessary. - # NOTE: Data streams support only the +create+ action. + # action_and_meta_data + # + # optional_source + # + # ``` + # + # The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. + # A `create` action fails if a document with the same ID already exists in the target + # An `index` action adds or replaces a document as necessary. + # NOTE: Data streams support only the `create` action. # To update or delete a document in a data stream, you must target the backing index containing the document. - # An +update+ action expects that the partial doc, upsert, and script and its options are specified on the next line. - # A +delete+ action does not expect a source on the next line and has the same semantics as the standard delete API. - # NOTE: The final line of data must end with a newline character (+\n+). - # Each newline character may be preceded by a carriage return (+\r+). - # When sending NDJSON data to the +_bulk+ endpoint, use a +Content-Type+ header of +application/json+ or +application/x-ndjson+. - # Because this format uses literal newline characters (+\n+) as delimiters, make sure that the JSON actions and sources are not pretty printed. - # If you provide a target in the request path, it is used for any actions that don't explicitly specify an +_index+ argument. + # An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. + # A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. + # NOTE: The final line of data must end with a newline character (`\n`). + # Each newline character may be preceded by a carriage return (`\r`). + # When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. + # Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. + # If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. # A note on the format: the idea here is to make processing as fast as possible. - # As some of the actions are redirected to other shards on other nodes, only +action_meta_data+ is parsed on the receiving node side. + # As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. # Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. # There is no "correct" number of actions to perform in a single bulk request. # Experiment with different settings to find the optimal size for your particular workload. @@ -64,69 +72,71 @@ module Actions # For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. # **Client suppport for bulk requests** # Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: - # * Go: Check out +esutil.BulkIndexer+ - # * Perl: Check out +Search::Elasticsearch::Client::5_0::Bulk+ and +Search::Elasticsearch::Client::5_0::Scroll+ - # * Python: Check out +elasticsearch.helpers.*+ - # * JavaScript: Check out +client.helpers.*+ - # * .NET: Check out +BulkAllObservable+ + # * Go: Check out `esutil.BulkIndexer` + # * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` + # * Python: Check out `elasticsearch.helpers.*` + # * JavaScript: Check out `client.helpers.*` + # * .NET: Check out `BulkAllObservable` # * PHP: Check out bulk indexing. # **Submitting bulk requests with cURL** - # If you're providing text file input to +curl+, you must use the +--data-binary+ flag instead of plain +-d+. + # If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. # The latter doesn't preserve newlines. For example: - # + + # + # ``` # $ cat requests # { "index" : { "_index" : "test", "_id" : "1" } } # { "field1" : "value1" } # $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo # {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} - # + + # ``` + # # **Optimistic concurrency control** - # Each +index+ and +delete+ action within a bulk API call may include the +if_seq_no+ and +if_primary_term+ parameters in their respective action and meta data lines. - # The +if_seq_no+ and +if_primary_term+ parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. + # Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. + # The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. # **Versioning** - # Each bulk item can include the version value using the +version+ field. - # It automatically follows the behavior of the index or delete operation based on the +_version+ mapping. - # It also support the +version_type+. + # Each bulk item can include the version value using the `version` field. + # It automatically follows the behavior of the index or delete operation based on the `_version` mapping. + # It also support the `version_type`. # **Routing** - # Each bulk item can include the routing value using the +routing+ field. - # It automatically follows the behavior of the index or delete operation based on the +_routing+ mapping. - # NOTE: Data streams do not support custom routing unless they were created with the +allow_custom_routing+ setting enabled in the template. + # Each bulk item can include the routing value using the `routing` field. + # It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. + # NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. # **Wait for active shards** - # When making bulk calls, you can set the +wait_for_active_shards+ parameter to require a minimum number of shard copies to be active before starting to process the bulk request. + # When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. # **Refresh** # Control when the changes made by this request are visible to search. # NOTE: Only the shards that receive the bulk request will be affected by refresh. - # Imagine a +_bulk?refresh=wait_for+ request with three documents in it that happen to be routed to different shards in an index with five shards. + # Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. # The request will only wait for those three shards to refresh. - # The other two shards that make up the index do not participate in the +_bulk+ request at all. + # The other two shards that make up the index do not participate in the `_bulk` request at all. # # @option arguments [String] :index The name of the data stream, index, or index alias to perform bulk actions on. # @option arguments [Boolean] :include_source_on_error True or false if to include the document source in the error message in case of parsing errors. Server default: true. - # @option arguments [Boolean] :list_executed_pipelines If +true+, the response will include the ingest pipelines that were run for each index or create. + # @option arguments [Boolean] :list_executed_pipelines If `true`, the response will include the ingest pipelines that were run for each index or create. # @option arguments [String] :pipeline The pipeline identifier to use to preprocess incoming documents. - # If the index has a default ingest pipeline specified, setting the value to +_none+ turns off the default ingest pipeline for this request. + # If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. # If a final pipeline is configured, it will always run regardless of the value of this parameter. - # @option arguments [String] :refresh If +true+, Elasticsearch refreshes the affected shards to make this operation visible to search. - # If +wait_for+, wait for a refresh to make this operation visible to search. - # If +false+, do nothing with refreshes. - # Valid values: +true+, +false+, +wait_for+. Server default: false. + # @option arguments [String] :refresh If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + # If `wait_for`, wait for a refresh to make this operation visible to search. + # If `false`, do nothing with refreshes. + # Valid values: `true`, `false`, `wait_for`. Server default: false. # @option arguments [String] :routing A custom value that is used to route operations to a specific shard. - # @option arguments [Boolean, String, Array] :_source Indicates whether to return the +_source+ field (+true+ or +false+) or contains a list of fields to return. + # @option arguments [Boolean, String, Array] :_source Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude from the response. - # You can also use this parameter to exclude fields from the subset specified in +_source_includes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response. # If this parameter is specified, only these source fields are returned. - # You can exclude fields from this subset using the +_source_excludes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can exclude fields from this subset using the `_source_excludes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [Time] :timeout The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. - # The default is +1m+ (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. + # The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. # The actual wait time could be longer, particularly when multiple waits occur. Server default: 1m. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). - # The default is +1+, which waits for each primary shard to be active. Server default: 1. - # @option arguments [Boolean] :require_alias If +true+, the request's actions must target an index alias. - # @option arguments [Boolean] :require_data_stream If +true+, the request's actions must target a data stream (existing or to be created). + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + # The default is `1`, which waits for each primary shard to be active. Server default: 1. + # @option arguments [Boolean] :require_alias If `true`, the request's actions must target an index alias. + # @option arguments [Boolean] :require_data_stream If `true`, the request's actions must target a data stream (existing or to be created). # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [String|Array] :body operations. Array of Strings, Header/Data pairs, or the conveniency "combined" format can be passed, refer to Elasticsearch::API::Utils.bulkify documentation. # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/aliases.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/aliases.rb index 0211dbc973..dd755d24b9 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/aliases.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/aliases.rb @@ -27,22 +27,22 @@ module Actions # This API does not return data stream aliases. # IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. # - # @option arguments [String, Array] :name A comma-separated list of aliases to retrieve. Supports wildcards (+*+). To retrieve all aliases, omit this parameter or use +*+ or +_all+. + # @option arguments [String, Array] :name A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # It supports comma-separated values, such as +open,hidden+. + # It supports comma-separated values, such as `open,hidden`. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicated that the request should never timeout, you can set it to +-1+. Server default: 30s. + # To indicated that the request should never timeout, you can set it to `-1`. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/allocation.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/allocation.rb index b08f92074c..72d2816769 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/allocation.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/allocation.rb @@ -30,18 +30,18 @@ module Actions # @option arguments [String] :bytes The unit used to display byte values. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/component_templates.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/component_templates.rb index 3d77c58d2c..1e28fdb0c2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/component_templates.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/component_templates.rb @@ -33,18 +33,18 @@ module Actions # If it is omitted, all component templates are returned. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/count.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/count.rb index 3bd4eb3fd2..4d712f33d7 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/count.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/count.rb @@ -29,17 +29,17 @@ module Actions # They are not intended for use by applications. For application consumption, use the count API. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request. - # It supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. + # It supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/fielddata.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/fielddata.rb index 5a08c6d8ec..76a9e992e8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/fielddata.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/fielddata.rb @@ -32,13 +32,13 @@ module Actions # @option arguments [String] :bytes The unit used to display byte values. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/health.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/health.rb index 241b38e607..83597bada4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/health.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/health.rb @@ -27,23 +27,23 @@ module Actions # They are not intended for use by applications. For application consumption, use the cluster health API. # This API is often used to check malfunctioning clusters. # To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: - # +HH:MM:SS+, which is human-readable but includes no date information; - # +Unix epoch time+, which is machine-sortable and includes date information. + # `HH:MM:SS`, which is human-readable but includes no date information; + # `Unix epoch time`, which is machine-sortable and includes date information. # The latter format is useful for cluster recoveries that take multiple days. # You can use the cat health API to verify cluster health across multiple nodes. # You also can use the API to track the recovery of a large cluster over a longer period of time. # # @option arguments [String] :time The unit used to display time values. - # @option arguments [Boolean] :ts If true, returns +HH:MM:SS+ and Unix epoch timestamps. Server default: true. + # @option arguments [Boolean] :ts If true, returns `HH:MM:SS` and Unix epoch timestamps. Server default: true. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/indices.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/indices.rb index 993d292105..ab561660c2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/indices.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/indices.rb @@ -36,7 +36,7 @@ module Actions # They are not intended for use by applications. For application consumption, use an index endpoint. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). To target all data streams and indices, omit this parameter or use +*+ or +_all+. + # Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. # @option arguments [String] :bytes The unit used to display byte values. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # @option arguments [String] :health The health status used to limit returned indices. By default, the response includes indices of any health status. @@ -46,13 +46,13 @@ module Actions # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/master.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/master.rb index 68c3f8d712..37c24c7b5e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/master.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/master.rb @@ -28,18 +28,18 @@ module Actions # # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_data_frame_analytics.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_data_frame_analytics.rb index f4744c521e..c278497718 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_data_frame_analytics.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_data_frame_analytics.rb @@ -29,17 +29,17 @@ module Actions # application consumption, use the get data frame analytics jobs statistics API. # # @option arguments [String] :id The ID of the data frame analytics to fetch - # @option arguments [Boolean] :allow_no_match Whether to ignore if a wildcard expression matches no configs. (This includes +_all+ string or when no configs have been specified) + # @option arguments [Boolean] :allow_no_match Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) # @option arguments [String] :bytes The unit in which to display byte values # @option arguments [String, Array] :h Comma-separated list of column names to display. Server default: create_time,id,state,type. # @option arguments [String, Array] :s Comma-separated list of column names or column aliases used to sort the # response. # @option arguments [String] :time Unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_datafeeds.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_datafeeds.rb index 4e54293420..fc841fa459 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_datafeeds.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_datafeeds.rb @@ -25,7 +25,7 @@ module Actions # Get datafeeds. # Get configuration and usage information about datafeeds. # This API returns a maximum of 10,000 datafeeds. - # If the Elasticsearch security features are enabled, you must have +monitor_ml+, +monitor+, +manage_ml+, or +manage+ + # If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` # cluster privileges to use this API. # IMPORTANT: CAT APIs are only intended for human consumption using the Kibana # console or command line. They are not intended for use by applications. For @@ -34,19 +34,19 @@ module Actions # @option arguments [String] :datafeed_id A numerical character string that uniquely identifies the datafeed. # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no datafeeds that match. - # - Contains the +_all+ string or no identifiers and there are no matches. + # - Contains the `_all` string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. - # If +true+, the API returns an empty datafeeds array when there are no matches and the subset of results when - # there are partial matches. If +false+, the API returns a 404 status code when there are no matches or only + # If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when + # there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only # partial matches. Server default: true. # @option arguments [String, Array] :h Comma-separated list of column names to display. Server default: ['bc', 'id', 'sc', 's']. # @option arguments [String, Array] :s Comma-separated list of column names or column aliases used to sort the response. # @option arguments [String] :time The unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_jobs.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_jobs.rb index cf4bdfe666..efd4347bc2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_jobs.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_jobs.rb @@ -25,8 +25,8 @@ module Actions # Get anomaly detection jobs. # Get configuration and usage information for anomaly detection jobs. # This API returns a maximum of 10,000 jobs. - # If the Elasticsearch security features are enabled, you must have +monitor_ml+, - # +monitor+, +manage_ml+, or +manage+ cluster privileges to use this API. + # If the Elasticsearch security features are enabled, you must have `monitor_ml`, + # `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. # IMPORTANT: CAT APIs are only intended for human consumption using the Kibana # console or command line. They are not intended for use by applications. For # application consumption, use the get anomaly detection job statistics API. @@ -34,20 +34,20 @@ module Actions # @option arguments [String] :job_id Identifier for the anomaly detection job. # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no jobs that match. - # - Contains the +_all+ string or no identifiers and there are no matches. + # - Contains the `_all` string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. - # If +true+, the API returns an empty jobs array when there are no matches and the subset of results when there - # are partial matches. If +false+, the API returns a 404 status code when there are no matches or only partial + # If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there + # are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial # matches. Server default: true. # @option arguments [String] :bytes The unit used to display byte values. # @option arguments [String, Array] :h Comma-separated list of column names to display. Server default: buckets.count,data.processed_records,forecasts.total,id,model.bytes,model.memory_status,state. # @option arguments [String, Array] :s Comma-separated list of column names or column aliases used to sort the response. # @option arguments [String] :time The unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_trained_models.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_trained_models.rb index 4d4401af64..6e7f3ca2d8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_trained_models.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_trained_models.rb @@ -29,9 +29,9 @@ module Actions # application consumption, use the get trained models statistics API. # # @option arguments [String] :model_id A unique identifier for the trained model. - # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the +_all+ string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. - # If +true+, the API returns an empty array when there are no matches and the subset of results when there are partial matches. - # If +false+, the API returns a 404 status code when there are no matches or only partial matches. Server default: true. + # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + # If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. + # If `false`, the API returns a 404 status code when there are no matches or only partial matches. Server default: true. # @option arguments [String] :bytes The unit used to display byte values. # @option arguments [String, Array] :h A comma-separated list of column names to display. # @option arguments [String, Array] :s A comma-separated list of column names or aliases used to sort the response. @@ -39,10 +39,10 @@ module Actions # @option arguments [Integer] :size The maximum number of transforms to display. # @option arguments [String] :time Unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodeattrs.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodeattrs.rb index b2035b5d0c..f15abfbb2d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodeattrs.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodeattrs.rb @@ -28,18 +28,18 @@ module Actions # # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb index c76b004be1..b56aeb6524 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb @@ -27,20 +27,20 @@ module Actions # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. # # @option arguments [String] :bytes The unit used to display byte values. - # @option arguments [Boolean, String] :full_id If +true+, return the full node ID. If +false+, return the shortened node ID. Server default: false. + # @option arguments [Boolean, String] :full_id If `true`, return the full node ID. If `false`, return the shortened node ID. Server default: false. # @option arguments [Boolean] :include_unloaded_segments If true, the response includes information from segments that are not loaded into memory. # @option arguments [String, Array] :h A comma-separated list of columns names to display. # It supports simple wildcards. Server default: ip,hp,rp,r,m,n,cpu,l. # @option arguments [String, Array] :s A comma-separated list of column names or aliases that determines the sort order. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :time The unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/pending_tasks.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/pending_tasks.rb index 345f0d1332..e3ef246a1f 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/pending_tasks.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/pending_tasks.rb @@ -28,19 +28,19 @@ module Actions # # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :time Unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/plugins.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/plugins.rb index db1602a6e9..d92c807b13 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/plugins.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/plugins.rb @@ -28,19 +28,19 @@ module Actions # # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [Boolean] :include_bootstrap Include bootstrap plugins in the response - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/recovery.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/recovery.rb index 93dec9c32c..cb2da5fd03 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/recovery.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/recovery.rb @@ -29,20 +29,20 @@ module Actions # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :active_only If +true+, the response only includes ongoing shard recoveries. + # Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :active_only If `true`, the response only includes ongoing shard recoveries. # @option arguments [String] :bytes The unit used to display byte values. - # @option arguments [Boolean] :detailed If +true+, the response includes detailed information about shard recoveries. + # @option arguments [Boolean] :detailed If `true`, the response includes detailed information about shard recoveries. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [String] :time Unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/repositories.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/repositories.rb index b565d40edd..1eb44ea844 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/repositories.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/repositories.rb @@ -28,18 +28,18 @@ module Actions # # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/segments.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/segments.rb index cab1160b86..5ad0943df0 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/segments.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/segments.rb @@ -28,23 +28,23 @@ module Actions # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. # @option arguments [String] :bytes The unit used to display byte values. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/shards.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/shards.rb index 04550ec87c..fcf2e59020 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/shards.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/shards.rb @@ -28,20 +28,20 @@ module Actions # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. # @option arguments [String] :bytes The unit used to display byte values. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :time Unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/snapshots.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/snapshots.rb index 2d39833e93..e227074af6 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/snapshots.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/snapshots.rb @@ -29,20 +29,20 @@ module Actions # # @option arguments [String, Array] :repository A comma-separated list of snapshot repositories used to limit the request. # Accepts wildcard expressions. - # +_all+ returns all repositories. + # `_all` returns all repositories. # If any repository fails during the request, Elasticsearch returns an error. - # @option arguments [Boolean] :ignore_unavailable If +true+, the response does not include information from unavailable snapshots. + # @option arguments [Boolean] :ignore_unavailable If `true`, the response does not include information from unavailable snapshots. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :time Unit used to display time values. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/tasks.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/tasks.rb index 050890bc76..3cb1c5d63c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/tasks.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/tasks.rb @@ -31,22 +31,22 @@ module Actions # support SLA of official GA features. # # @option arguments [Array] :actions The task action names, which are used to limit the response. - # @option arguments [Boolean] :detailed If +true+, the response includes detailed information about shard recoveries. + # @option arguments [Boolean] :detailed If `true`, the response includes detailed information about shard recoveries. # @option arguments [Array] :nodes Unique node identifiers, which are used to limit the response. # @option arguments [String] :parent_task_id The parent task identifier, which is used to limit the response. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [String] :time Unit used to display time values. # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. - # @option arguments [Boolean] :wait_for_completion If +true+, the request blocks until the task has completed. + # @option arguments [Boolean] :wait_for_completion If `true`, the request blocks until the task has completed. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/templates.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/templates.rb index 5bdc457bcd..8b3851246c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/templates.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/templates.rb @@ -31,18 +31,18 @@ module Actions # Accepts wildcard expressions. If omitted, all templates are returned. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/thread_pool.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/thread_pool.rb index be0cd62fdc..783db49182 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/thread_pool.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/thread_pool.rb @@ -31,19 +31,19 @@ module Actions # Accepts wildcard expressions. # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards. # @option arguments [String, Array] :s List of columns that determine how the table should be sorted. - # Sorting defaults to ascending and can be changed by setting +:asc+ - # or +:desc+ as a suffix to the column name. + # Sorting defaults to ascending and can be changed by setting `:asc` + # or `:desc` as a suffix to the column name. # @option arguments [String] :time The unit used to display time values. - # @option arguments [Boolean] :local If +true+, the request computes the list of selected nodes from the - # local cluster state. If +false+ the list of selected nodes are computed + # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the + # local cluster state. If `false` the list of selected nodes are computed # from the cluster state of the master node. In both cases the coordinating # node will send requests for further information to each selected node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/transforms.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/transforms.rb index 9390a25881..aa5b4d39a5 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/transforms.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/transforms.rb @@ -30,19 +30,19 @@ module Actions # # @option arguments [String] :transform_id A transform identifier or a wildcard expression. # If you do not specify one of these options, the API returns information for all transforms. - # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the +_all+ string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. - # If +true+, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. - # If +false+, the request returns a 404 status code when there are no matches or only partial matches. Server default: true. + # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + # If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. + # If `false`, the request returns a 404 status code when there are no matches or only partial matches. Server default: true. # @option arguments [Integer] :from Skips the specified number of transforms. Server default: 0. # @option arguments [String, Array] :h Comma-separated list of column names to display. Server default: changes_last_detection_time,checkpoint,checkpoint_progress,documents_processed,id,last_search_time,state. # @option arguments [String, Array] :s Comma-separated list of column names or column aliases used to sort the response. # @option arguments [String] :time The unit used to display time values. # @option arguments [Integer] :size The maximum number of transforms to obtain. Server default: 100. # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to - # +text+, +json+, +cbor+, +yaml+, or +smile+. Server default: text. - # @option arguments [Boolean] :help When set to +true+ will output available columns. This option + # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text. + # @option arguments [Boolean] :help When set to `true` will output available columns. This option # can't be combined with any other query string option. - # @option arguments [Boolean] :v When set to +true+ will enable verbose output. + # @option arguments [Boolean] :v When set to `true` will enable verbose output. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/clear_scroll.rb b/elasticsearch-api/lib/elasticsearch/api/actions/clear_scroll.rb index 520f28001a..5bf6be854e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/clear_scroll.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/clear_scroll.rb @@ -25,7 +25,7 @@ module Actions # Clear the search context and results for a scrolling search. # # @option arguments [String, Array] :scroll_id A comma-separated list of scroll IDs to clear. - # To clear all scroll IDs, use +_all+. + # To clear all scroll IDs, use `_all`. # IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/close_point_in_time.rb b/elasticsearch-api/lib/elasticsearch/api/actions/close_point_in_time.rb index 537ed0eff6..5c120c2fe9 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/close_point_in_time.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/close_point_in_time.rb @@ -23,8 +23,8 @@ module API module Actions # Close a point in time. # A point in time must be opened explicitly before being used in search requests. - # The +keep_alive+ parameter tells Elasticsearch how long it should persist. - # A point in time is automatically closed when the +keep_alive+ period has elapsed. + # The `keep_alive` parameter tells Elasticsearch how long it should persist. + # A point in time is automatically closed when the `keep_alive` period has elapsed. # However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. # # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_component_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_component_template.rb index 3f57430a52..51378a2351 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_component_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_component_template.rb @@ -26,11 +26,11 @@ module Actions # Get information about component templates. # # @option arguments [String] :name Comma-separated list of component template names used to limit the request. - # Wildcard (+*+) expressions are supported. - # @option arguments [Boolean] :flat_settings If +true+, returns settings in flat format. + # Wildcard (`*`) expressions are supported. + # @option arguments [Boolean] :flat_settings If `true`, returns settings in flat format. # @option arguments [Boolean] :include_defaults Return all default configurations for the component template (default: false) - # @option arguments [Boolean] :local If +true+, the request retrieves information from the local node only. - # If +false+, information is retrieved from the master node. + # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only. + # If `false`, information is retrieved from the master node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_settings.rb index 6f636d813d..bb722504f4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_settings.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_settings.rb @@ -25,8 +25,8 @@ module Actions # Get cluster-wide settings. # By default, it returns only settings that have been explicitly defined. # - # @option arguments [Boolean] :flat_settings If +true+, returns settings in flat format. - # @option arguments [Boolean] :include_defaults If +true+, returns default cluster settings from the local node. + # @option arguments [Boolean] :flat_settings If `true`, returns settings in flat format. + # @option arguments [Boolean] :include_defaults If `true`, returns default cluster settings from the local node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/health.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/health.rb index bf5a38b0ff..fb3becd60d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/health.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/health.rb @@ -31,7 +31,7 @@ module Actions # One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. # The cluster status is controlled by the worst index status. # - # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (+*+) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or +*+. + # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. # @option arguments [String] :level Can be one of cluster, indices or shards. Controls the details level of the health information returned. Server default: cluster. # @option arguments [Boolean] :local If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/pending_tasks.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/pending_tasks.rb index f0430b73c3..29abf88497 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/pending_tasks.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/pending_tasks.rb @@ -28,8 +28,8 @@ module Actions # These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. # However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. # - # @option arguments [Boolean] :local If +true+, the request retrieves information from the local node only. - # If +false+, information is retrieved from the master node. + # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only. + # If `false`, information is retrieved from the master node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/post_voting_config_exclusions.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/post_voting_config_exclusions.rb index 35bd36c80c..11c4e912ea 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/post_voting_config_exclusions.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/post_voting_config_exclusions.rb @@ -29,11 +29,11 @@ module Actions # The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. # It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. # Clusters should have no voting configuration exclusions in normal operation. - # Once the excluded nodes have stopped, clear the voting configuration exclusions with +DELETE /_cluster/voting_config_exclusions+. + # Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. # This API waits for the nodes to be fully removed from the cluster before it returns. - # If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use +DELETE /_cluster/voting_config_exclusions?wait_for_removal=false+ to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. - # A response to +POST /_cluster/voting_config_exclusions+ with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling +DELETE /_cluster/voting_config_exclusions+. - # If the call to +POST /_cluster/voting_config_exclusions+ fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. + # If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. + # A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + # If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. # In that case, you may safely retry the call. # NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. # They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_component_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_component_template.rb index 498d36572c..4fc79b3c79 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_component_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_component_template.rb @@ -25,24 +25,24 @@ module Actions # Create or update a component template. # Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. # An index template can be composed of multiple component templates. - # To use a component template, specify it in an index template’s +composed_of+ list. + # To use a component template, specify it in an index template’s `composed_of` list. # Component templates are only applied to new data streams and indices as part of a matching index template. # Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. # Component templates are only used during index creation. # For data streams, this includes data stream creation and the creation of a stream’s backing indices. # Changes to component templates do not affect existing indices, including a stream’s backing indices. - # You can use C-style +/* *\/+ block comments in component templates. + # You can use C-style `/* *\/` block comments in component templates. # You can include comments anywhere in the request body except before the opening curly bracket. # **Applying component templates** # You cannot directly apply a component template to a data stream or index. - # To be applied, a component template must be included in an index template's +composed_of+ list. + # To be applied, a component template must be included in an index template's `composed_of` list. # # @option arguments [String] :name Name of the component template to create. - # Elasticsearch includes the following built-in component templates: +logs-mappings+; +logs-settings+; +metrics-mappings+; +metrics-settings+;+synthetics-mapping+; +synthetics-settings+. + # Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. # Elastic Agent uses these templates to configure backing indices for its data streams. - # If you use Elastic Agent and want to overwrite one of these templates, set the +version+ for your replacement template higher than the current version. - # If you don’t use Elastic Agent and want to disable all built-in component and index templates, set +stack.templates.enabled+ to +false+ using the cluster update settings API. (*Required*) - # @option arguments [Boolean] :create If +true+, this request cannot replace or update existing component templates. + # If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. + # If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. (*Required*) + # @option arguments [Boolean] :create If `true`, this request cannot replace or update existing component templates. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_settings.rb index 655fa2cc63..231844c826 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_settings.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_settings.rb @@ -24,15 +24,15 @@ module Cluster module Actions # Update the cluster settings. # Configure and update dynamic settings on a running cluster. - # You can also configure dynamic settings locally on an unstarted or shut down node in +elasticsearch.yml+. + # You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. # Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. # You can also reset transient or persistent settings by assigning them a null value. - # If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) +elasticsearch.yml+ setting; 4) Default setting value. - # For example, you can apply a transient setting to override a persistent setting or +elasticsearch.yml+ setting. - # However, a change to an +elasticsearch.yml+ setting will not override a defined transient or persistent setting. + # If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + # For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. + # However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. # TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. # If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. - # Only use +elasticsearch.yml+ for static cluster settings and node settings. + # Only use `elasticsearch.yml` for static cluster settings and node settings. # The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. # WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. # If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/reroute.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/reroute.rb index dd24a9608d..55f98a07f8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/reroute.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/reroute.rb @@ -25,13 +25,13 @@ module Actions # Reroute the cluster. # Manually change the allocation of individual shards in the cluster. # For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. - # It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as +cluster.routing.rebalance.enable+) in order to remain in a balanced state. + # It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. # For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. - # The cluster can be set to disable allocations using the +cluster.routing.allocation.enable+ setting. + # The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. # If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. - # The cluster will attempt to allocate a shard a maximum of +index.allocation.max_retries+ times in a row (defaults to +5+), before giving up and leaving the shard unallocated. + # The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. # This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. - # Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the +?retry_failed+ URI query parameter, which will attempt a single retry round for these shards. + # Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. # # @option arguments [Boolean] :dry_run If true, then the request simulates the operation. # It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/state.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/state.rb index e242fde3e0..4a1bcbe917 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/state.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/state.rb @@ -29,7 +29,7 @@ module Actions # This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. # You may need to consult the Elasticsearch source code to determine the precise meaning of the response. # By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. - # You can also retrieve the cluster state held on the node handling the API request by adding the +?local=true+ query parameter. + # You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. # Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. # If you use this API repeatedly, your cluster may become unstable. # WARNING: The response is a representation of an internal data structure. @@ -38,8 +38,8 @@ module Actions # Instead, obtain the information you require using other more stable cluster APIs. # # @option arguments [String, Array] :metric Limit the information returned to the specified metrics - # @option arguments [String, Array] :index A comma-separated list of index names; use +_all+ or empty string to perform the operation on all indices - # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes +_all+ string or when no indices have been specified) Server default: true. + # @option arguments [String, Array] :index A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices + # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) Server default: true. # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. # @option arguments [Boolean] :flat_settings Return settings in flat format (default: false) # @option arguments [Boolean] :ignore_unavailable Whether specified concrete indices should be ignored when unavailable (missing or closed) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/stats.rb index fa07c366fa..bfcac76e44 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/stats.rb @@ -29,7 +29,7 @@ module Actions # @option arguments [Boolean] :include_remotes Include remote cluster data into the response # @option arguments [Time] :timeout Period to wait for each node to respond. # If a node does not respond before its timeout expires, the response does not include its stats. - # However, timed out nodes are included in the response’s +_nodes.failed+ property. Defaults to no timeout. + # However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/check_in.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/check_in.rb index 6211600bbe..ac28260249 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/check_in.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/check_in.rb @@ -23,7 +23,7 @@ module API module Connector module Actions # Check in a connector. - # Update the +last_seen+ field in the connector and set it to the current timestamp. + # Update the `last_seen` field in the connector and set it to the current timestamp. # This functionality is Experimental and may be changed or removed # completely in a future release. Elastic will take a best effort approach # to fix any issues, but experimental features are not subject to the diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_cancel.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_cancel.rb index 78b8b46990..f59d314d85 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_cancel.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_cancel.rb @@ -23,7 +23,7 @@ module API module Connector module Actions # Cancel a connector sync job. - # Cancel a connector sync job, which sets the status to cancelling and updates +cancellation_requested_at+ to the current time. + # Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. # The connector service is then responsible for setting the status of connector sync jobs to cancelled. # This functionality is in Beta and is subject to change. The design and # code is less mature than official GA features and is being provided diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_check_in.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_check_in.rb index 8d1fa84500..e27894ea20 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_check_in.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_check_in.rb @@ -23,7 +23,7 @@ module API module Connector module Actions # Check in a connector sync job. - # Check in a connector sync job and set the +last_seen+ field to the current time before updating it in the internal index. + # Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. # To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. # This service runs automatically on Elastic Cloud for Elastic managed connectors. # This functionality is Experimental and may be changed or removed diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_claim.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_claim.rb index bfd4ec791b..4da848784d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_claim.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_claim.rb @@ -23,8 +23,8 @@ module API module Connector module Actions # Claim a connector sync job. - # This action updates the job status to +in_progress+ and sets the +last_seen+ and +started_at+ timestamps to the current time. - # Additionally, it can set the +sync_cursor+ property for the sync job. + # This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. + # Additionally, it can set the `sync_cursor` property for the sync job. # This API is not intended for direct connector management by users. # It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. # To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_error.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_error.rb index f1ac042d4c..2afc4e0dfa 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_error.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_error.rb @@ -23,7 +23,7 @@ module API module Connector module Actions # Set a connector sync job error. - # Set the +error+ field for a connector sync job and set its +status+ to +error+. + # Set the `error` field for a connector sync job and set its `status` to `error`. # To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. # This service runs automatically on Elastic Cloud for Elastic managed connectors. # This functionality is Experimental and may be changed or removed diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_update_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_update_stats.rb index ede9d35e74..0860c1d5eb 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_update_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_update_stats.rb @@ -23,8 +23,8 @@ module API module Connector module Actions # Set the connector sync job stats. - # Stats include: +deleted_document_count+, +indexed_document_count+, +indexed_document_volume+, and +total_document_count+. - # You can also update +last_seen+. + # Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. + # You can also update `last_seen`. # This API is mainly used by the connector service for updating sync job information. # To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. # This service runs automatically on Elastic Cloud for Elastic managed connectors. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/update_api_key_id.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/update_api_key_id.rb index 4f02552408..a600e8275c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/update_api_key_id.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/update_api_key_id.rb @@ -23,7 +23,7 @@ module API module Connector module Actions # Update the connector API key ID. - # Update the +api_key_id+ and +api_key_secret_id+ fields of a connector. + # Update the `api_key_id` and `api_key_secret_id` fields of a connector. # You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. # The connector secret ID is required only for Elastic managed (native) connectors. # Self-managed connectors (connector clients) do not use this field. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/update_index_name.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/update_index_name.rb index 0b9d9df6b5..9618eb21e8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/update_index_name.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/update_index_name.rb @@ -23,7 +23,7 @@ module API module Connector module Actions # Update the connector index name. - # Update the +index_name+ field of a connector, specifying the index where the data ingested by the connector is stored. + # Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. # This functionality is in Beta and is subject to change. The design and # code is less mature than official GA features and is being provided # as-is with no warranties. Beta features are not subject to the support diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/count.rb b/elasticsearch-api/lib/elasticsearch/api/actions/count.rb index 5600a3b1d3..1721e9c259 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/count.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/count.rb @@ -24,34 +24,34 @@ module Actions # Count search results. # Get the number of documents matching a query. # The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. - # The query is optional. When no query is provided, the API uses +match_all+ to count all the documents. + # The query is optional. When no query is provided, the API uses `match_all` to count all the documents. # The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. # The operation is broadcast across all shards. # For each shard ID group, a replica is chosen and the search is run against it. # This means that replicas increase the scalability of the count. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases to search. - # It supports wildcards (+*+). - # To search all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # It supports wildcards (`*`). + # To search all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. # @option arguments [String] :analyzer The analyzer to use for the query string. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [Boolean] :analyze_wildcard If +true+, wildcard and prefix queries are analyzed. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [String] :default_operator The default operator for query string query: +AND+ or +OR+. - # This parameter can be used only when the +q+ query string parameter is specified. Server default: OR. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [Boolean] :analyze_wildcard If `true`, wildcard and prefix queries are analyzed. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [String] :default_operator The default operator for query string query: `AND` or `OR`. + # This parameter can be used only when the `q` query string parameter is specified. Server default: OR. # @option arguments [String] :df The field to use as a default when no field prefix is given in the query string. - # This parameter can be used only when the +q+ query string parameter is specified. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # It supports comma-separated values, such as +open,hidden+. Server default: open. - # @option arguments [Boolean] :ignore_throttled If +true+, concrete, expanded, or aliased indices are ignored when frozen. Server default: true. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :lenient If +true+, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [Float] :min_score The minimum +_score+ value that documents must have to be included in the result. + # It supports comma-separated values, such as `open,hidden`. Server default: open. + # @option arguments [Boolean] :ignore_throttled If `true`, concrete, expanded, or aliased indices are ignored when frozen. Server default: true. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :lenient If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [Float] :min_score The minimum `_score` value that documents must have to be included in the result. # @option arguments [String] :preference The node or shard the operation should be performed on. # By default, it is random. # @option arguments [String] :routing A custom value used to route operations to a specific shard. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/create.rb b/elasticsearch-api/lib/elasticsearch/api/actions/create.rb index d631a1bba9..7317758ea8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/create.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/create.rb @@ -22,78 +22,78 @@ module Elasticsearch module API module Actions # Create a new document in the index. - # You can index a new JSON document with the +//_doc/+ or +//_create/<_id>+ APIs - # Using +_create+ guarantees that the document is indexed only if it does not already exist. + # You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs + # Using `_create` guarantees that the document is indexed only if it does not already exist. # It returns a 409 response when a document with a same ID already exists in the index. - # To update an existing document, you must use the +//_doc/+ API. + # To update an existing document, you must use the `//_doc/` API. # If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: - # * To add a document using the +PUT //_create/<_id>+ or +POST //_create/<_id>+ request formats, you must have the +create_doc+, +create+, +index+, or +write+ index privilege. - # * To automatically create a data stream or index with this API request, you must have the +auto_configure+, +create_index+, or +manage+ index privilege. + # * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. + # * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. # Automatic data stream creation requires a matching index template with data stream enabled. # **Automatically create data streams and indices** - # If the request's target doesn't exist and matches an index template with a +data_stream+ definition, the index operation automatically creates the data stream. + # If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. # If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. # NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. # If no mapping exists, the index operation creates a dynamic mapping. # By default, new fields and objects are automatically added to the mapping if needed. - # Automatic index creation is controlled by the +action.auto_create_index+ setting. - # If it is +true+, any index can be created automatically. - # You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to +false+ to turn off automatic index creation entirely. - # Specify a comma-separated list of patterns you want to allow or prefix each pattern with +++ or +-+ to indicate whether it should be allowed or blocked. + # Automatic index creation is controlled by the `action.auto_create_index` setting. + # If it is `true`, any index can be created automatically. + # You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. + # Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. # When a list is specified, the default behaviour is to disallow. - # NOTE: The +action.auto_create_index+ setting affects the automatic creation of indices only. + # NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. # It does not affect the creation of data streams. # **Routing** # By default, shard placement — or routing — is controlled by using a hash of the document's ID value. - # For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the +routing+ parameter. - # When setting up explicit mapping, you can also use the +_routing+ field to direct the index operation to extract the routing value from the document itself. + # For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + # When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. # This does come at the (very minimal) cost of an additional document parsing pass. - # If the +_routing+ mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. - # NOTE: Data streams do not support custom routing unless they were created with the +allow_custom_routing+ setting enabled in the template. + # If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + # NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. # **Distributed** # The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. # After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. # **Active shards** # To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. # If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. - # By default, write operations only wait for the primary shards to be active before proceeding (that is to say +wait_for_active_shards+ is +1+). - # This default can be overridden in the index settings dynamically by setting +index.write.wait_for_active_shards+. - # To alter this behavior per operation, use the +wait_for_active_shards request+ parameter. - # Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is +number_of_replicas++1). + # By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). + # This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. + # To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + # Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). # Specifying a negative value or a number greater than the number of shard copies will throw an error. # For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). # If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. # This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. - # If +wait_for_active_shards+ is set on the request to +3+ (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. + # If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. # This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. - # However, if you set +wait_for_active_shards+ to +all+ (or to +4+, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. + # However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. # The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. # It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. # After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. - # The +_shards+ section of the API response reveals the number of shard copies on which replication succeeded and failed. + # The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. # # @option arguments [String] :id A unique identifier for the document. - # To automatically generate a document ID, use the +POST //_doc/+ request format. (*Required*) + # To automatically generate a document ID, use the `POST //_doc/` request format. (*Required*) # @option arguments [String] :index The name of the data stream or index to target. - # If the target doesn't exist and matches the name or wildcard (+*+) pattern of an index template with a +data_stream+ definition, this request creates the data stream. + # If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. # If the target doesn't exist and doesn’t match a data stream template, this request creates the index. (*Required*) # @option arguments [Integer] :if_primary_term Only perform the operation if the document has this primary term. # @option arguments [Integer] :if_seq_no Only perform the operation if the document has this sequence number. # @option arguments [Boolean] :include_source_on_error True or false if to include the document source in the error message in case of parsing errors. Server default: true. - # @option arguments [String] :op_type Set to +create+ to only index the document if it does not already exist (put if absent). - # If a document with the specified +_id+ already exists, the indexing operation will fail. - # The behavior is the same as using the +/_create+ endpoint. - # If a document ID is specified, this paramater defaults to +index+. - # Otherwise, it defaults to +create+. - # If the request targets a data stream, an +op_type+ of +create+ is required. + # @option arguments [String] :op_type Set to `create` to only index the document if it does not already exist (put if absent). + # If a document with the specified `_id` already exists, the indexing operation will fail. + # The behavior is the same as using the `/_create` endpoint. + # If a document ID is specified, this paramater defaults to `index`. + # Otherwise, it defaults to `create`. + # If the request targets a data stream, an `op_type` of `create` is required. # @option arguments [String] :pipeline The ID of the pipeline to use to preprocess incoming documents. - # If the index has a default ingest pipeline specified, setting the value to +_none+ turns off the default ingest pipeline for this request. + # If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. # If a final pipeline is configured, it will always run regardless of the value of this parameter. - # @option arguments [String] :refresh If +true+, Elasticsearch refreshes the affected shards to make this operation visible to search. - # If +wait_for+, it waits for a refresh to make this operation visible to search. - # If +false+, it does nothing with refreshes. Server default: false. - # @option arguments [Boolean] :require_alias If +true+, the destination must be an index alias. - # @option arguments [Boolean] :require_data_stream If +true+, the request's actions must target a data stream (existing or to be created). + # @option arguments [String] :refresh If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + # If `wait_for`, it waits for a refresh to make this operation visible to search. + # If `false`, it does nothing with refreshes. Server default: false. + # @option arguments [Boolean] :require_alias If `true`, the destination must be an index alias. + # @option arguments [Boolean] :require_data_stream If `true`, the request's actions must target a data stream (existing or to be created). # @option arguments [String] :routing A custom value that is used to route operations to a specific shard. # @option arguments [Time] :timeout The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. # Elasticsearch waits for at least the specified timeout period before failing. @@ -105,8 +105,8 @@ module Actions # It must be a non-negative long number. # @option arguments [String] :version_type The version type. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # You can set it to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). - # The default value of +1+ means it waits for each primary shard to be active. Server default: 1. + # You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + # The default value of `1` means it waits for each primary shard to be active. Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body document # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/delete_auto_follow_pattern.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/delete_auto_follow_pattern.rb index 71289336eb..43d3c0c49b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/delete_auto_follow_pattern.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/delete_auto_follow_pattern.rb @@ -28,7 +28,7 @@ module Actions # @option arguments [String] :name The auto-follow pattern collection to delete. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/follow_info.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/follow_info.rb index 97f99ecb4d..86d3552367 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/follow_info.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/follow_info.rb @@ -29,7 +29,7 @@ module Actions # @option arguments [String, Array] :index A comma-delimited list of follower index patterns. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/get_auto_follow_pattern.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/get_auto_follow_pattern.rb index 389006f732..96ff0fc329 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/get_auto_follow_pattern.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/get_auto_follow_pattern.rb @@ -29,7 +29,7 @@ module Actions # If you do not specify a name, the API returns information for all collections. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1 diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/pause_auto_follow_pattern.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/pause_auto_follow_pattern.rb index ae3e7e1607..701626ba5d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/pause_auto_follow_pattern.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/pause_auto_follow_pattern.rb @@ -33,7 +33,7 @@ module Actions # @option arguments [String] :name The name of the auto-follow pattern to pause. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/pause_follow.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/pause_follow.rb index 0e48a15183..0de5273bc2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/pause_follow.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/pause_follow.rb @@ -31,7 +31,7 @@ module Actions # @option arguments [String] :index The name of the follower index. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/resume_auto_follow_pattern.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/resume_auto_follow_pattern.rb index c2d10db56a..ef3e36f816 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/resume_auto_follow_pattern.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/resume_auto_follow_pattern.rb @@ -30,7 +30,7 @@ module Actions # @option arguments [String] :name The name of the auto-follow pattern to resume. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/stats.rb index c43c2431b2..5d27169711 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/stats.rb @@ -27,7 +27,7 @@ module Actions # # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/unfollow.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/unfollow.rb index 08d8fc3e02..4a79112014 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/unfollow.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/cross_cluster_replication/unfollow.rb @@ -30,7 +30,7 @@ module Actions # @option arguments [String] :index The name of the follower index. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/delete_dangling_index.rb b/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/delete_dangling_index.rb index e480bd03b1..5c6e756834 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/delete_dangling_index.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/delete_dangling_index.rb @@ -24,7 +24,7 @@ module DanglingIndices module Actions # Delete a dangling index. # If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. - # For example, this can happen if you delete more than +cluster.indices.tombstones.size+ indices while an Elasticsearch node is offline. + # For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. # # @option arguments [String] :index_uuid The UUID of the index to delete. Use the get dangling indices API to find the UUID. (*Required*) # @option arguments [Boolean] :accept_data_loss This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/import_dangling_index.rb b/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/import_dangling_index.rb index ff9ab687c8..dbccde6a9d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/import_dangling_index.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/import_dangling_index.rb @@ -24,7 +24,7 @@ module DanglingIndices module Actions # Import a dangling index. # If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. - # For example, this can happen if you delete more than +cluster.indices.tombstones.size+ indices while an Elasticsearch node is offline. + # For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. # # @option arguments [String] :index_uuid The UUID of the index to import. Use the get dangling indices API to locate the UUID. (*Required*) # @option arguments [Boolean] :accept_data_loss This parameter must be set to true to import a dangling index. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/list_dangling_indices.rb b/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/list_dangling_indices.rb index 18cf82e5f4..ef501e521d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/list_dangling_indices.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/dangling_indices/list_dangling_indices.rb @@ -24,7 +24,7 @@ module DanglingIndices module Actions # Get the dangling indices. # If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. - # For example, this can happen if you delete more than +cluster.indices.tombstones.size+ indices while an Elasticsearch node is offline. + # For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. # Use this API to list dangling indices, which you can then import or delete. # # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/delete.rb index 37367fe4c4..523e8c1670 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/delete.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/delete.rb @@ -26,21 +26,23 @@ module Actions # NOTE: You cannot send deletion requests directly to a data stream. # To delete a document in a data stream, you must target the backing index containing the document. # **Optimistic concurrency control** - # Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the +if_seq_no+ and +if_primary_term+ parameters. - # If a mismatch is detected, the operation will result in a +VersionConflictException+ and a status code of +409+. + # Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. + # If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. # **Versioning** # Each document indexed is versioned. # When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. # Every write operation run on a document, deletes included, causes its version to be incremented. # The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. - # The length of time for which a deleted document's version remains available is determined by the +index.gc_deletes+ index setting. + # The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. # **Routing** # If routing is used during indexing, the routing value also needs to be specified to delete a document. - # If the +_routing+ mapping is set to +required+ and no routing value is specified, the delete API throws a +RoutingMissingException+ and rejects the request. + # If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. # For example: - # + + # + # ``` # DELETE /my-index-000001/_doc/1?routing=shard-1 - # + + # ``` + # # This request deletes the document with ID 1, but it is routed based on the user. # The document is not deleted if the correct routing is not specified. # **Distributed** @@ -51,9 +53,9 @@ module Actions # @option arguments [String] :index The name of the target index. (*Required*) # @option arguments [Integer] :if_primary_term Only perform the operation if the document has this primary term. # @option arguments [Integer] :if_seq_no Only perform the operation if the document has this sequence number. - # @option arguments [String] :refresh If +true+, Elasticsearch refreshes the affected shards to make this operation visible to search. - # If +wait_for+, it waits for a refresh to make this operation visible to search. - # If +false+, it does nothing with refreshes. Server default: false. + # @option arguments [String] :refresh If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + # If `wait_for`, it waits for a refresh to make this operation visible to search. + # If `false`, it does nothing with refreshes. Server default: false. # @option arguments [String] :routing A custom value used to route operations to a specific shard. # @option arguments [Time] :timeout The period to wait for active shards.This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. # Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. @@ -62,8 +64,8 @@ module Actions # It must match the current version of the document for the request to succeed. # @option arguments [String] :version_type The version type. # @option arguments [Integer, String] :wait_for_active_shards The minimum number of shard copies that must be active before proceeding with the operation. - # You can set it to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). - # The default value of +1+ means it waits for each primary shard to be active. Server default: 1. + # You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + # The default value of `1` means it waits for each primary shard to be active. Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/delete_by_query.rb b/elasticsearch-api/lib/elasticsearch/api/actions/delete_by_query.rb index c7489af433..4982a44633 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/delete_by_query.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/delete_by_query.rb @@ -24,8 +24,8 @@ module Actions # Delete documents. # Deletes documents that match the specified query. # If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: - # * +read+ - # * +delete+ or +write+ + # * `read` + # * `delete` or `write` # You can specify the query criteria in the request URI or the request body using the same syntax as the search API. # When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. # If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. @@ -35,80 +35,84 @@ module Actions # If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. # If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. # Any delete requests that completed successfully still stick, they are not rolled back. - # You can opt to count version conflicts instead of halting and returning by setting +conflicts+ to +proceed+. - # Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than +max_docs+ until it has successfully deleted +max_docs documents+, or it has gone through every document in the source query. + # You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. + # Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. # **Throttling delete requests** - # To control the rate at which delete by query issues batches of delete operations, you can set +requests_per_second+ to any positive decimal number. + # To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. # This pads each batch with a wait time to throttle the rate. - # Set +requests_per_second+ to +-1+ to disable throttling. + # Set `requests_per_second` to `-1` to disable throttling. # Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. - # The padding time is the difference between the batch size divided by the +requests_per_second+ and the time spent writing. - # By default the batch size is +1000+, so if +requests_per_second+ is set to +500+: - # + + # The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. + # By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + # + # ``` # target_time = 1000 / 500 per second = 2 seconds # wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds - # + - # Since the batch is issued as a single +_bulk+ request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. + # ``` + # + # Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. # This is "bursty" instead of "smooth". # **Slicing** # Delete by query supports sliced scroll to parallelize the delete process. # This can improve efficiency and provide a convenient way to break the request down into smaller parts. - # Setting +slices+ to +auto+ lets Elasticsearch choose the number of slices to use. + # Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. # This setting will use one slice per shard, up to a certain limit. # If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. # Adding slices to the delete by query operation creates sub-requests which means it has some quirks: # * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. # * Fetching the status of the task for the request with slices only contains the status of completed slices. # * These sub-requests are individually addressable for things like cancellation and rethrottling. - # * Rethrottling the request with +slices+ will rethrottle the unfinished sub-request proportionally. - # * Canceling the request with +slices+ will cancel each sub-request. - # * Due to the nature of +slices+ each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. - # * Parameters like +requests_per_second+ and +max_docs+ on a request with +slices+ are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using +max_docs+ with +slices+ might not result in exactly +max_docs+ documents being deleted. + # * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. + # * Canceling the request with `slices` will cancel each sub-request. + # * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. + # * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. # * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. # If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: - # * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many +slices+ hurts performance. Setting +slices+ higher than the number of shards generally does not improve efficiency and adds overhead. + # * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. # * Delete performance scales linearly across available resources with the number of slices. # Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. # **Cancel a delete by query operation** # Any delete by query can be canceled using the task cancel API. For example: - # + + # + # ``` # POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel - # + + # ``` + # # The task ID can be found by using the get tasks API. # Cancellation should happen quickly but might take a few seconds. # The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases to search. - # It supports wildcards (+*+). - # To search all data streams or indices, omit this parameter or use +*+ or +_all+. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # It supports wildcards (`*`). + # To search all data streams or indices, omit this parameter or use `*` or `_all`. (*Required*) + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. # @option arguments [String] :analyzer Analyzer to use for the query string. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [Boolean] :analyze_wildcard If +true+, wildcard and prefix queries are analyzed. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [String] :conflicts What to do if delete by query hits version conflicts: +abort+ or +proceed+. Server default: abort. - # @option arguments [String] :default_operator The default operator for query string query: +AND+ or +OR+. - # This parameter can be used only when the +q+ query string parameter is specified. Server default: OR. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [Boolean] :analyze_wildcard If `true`, wildcard and prefix queries are analyzed. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [String] :conflicts What to do if delete by query hits version conflicts: `abort` or `proceed`. Server default: abort. + # @option arguments [String] :default_operator The default operator for query string query: `AND` or `OR`. + # This parameter can be used only when the `q` query string parameter is specified. Server default: OR. # @option arguments [String] :df The field to use as default where no field prefix is given in the query string. - # This parameter can be used only when the +q+ query string parameter is specified. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # It supports comma-separated values, such as +open,hidden+. Server default: open. + # It supports comma-separated values, such as `open,hidden`. Server default: open. # @option arguments [Integer] :from Skips the specified number of documents. Server default: 0. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :lenient If +true+, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - # This parameter can be used only when the +q+ query string parameter is specified. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :lenient If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [Integer] :max_docs The maximum number of documents to process. # Defaults to all documents. - # When set to a value less then or equal to +scroll_size+, a scroll will not be used to retrieve the results for the operation. + # When set to a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. # @option arguments [String] :preference The node or shard the operation should be performed on. # It is random by default. - # @option arguments [Boolean] :refresh If +true+, Elasticsearch refreshes all shards involved in the delete by query after the request completes. - # This is different than the delete API's +refresh+ parameter, which causes just the shard that received the delete request to be refreshed. - # Unlike the delete API, it does not support +wait_for+. - # @option arguments [Boolean] :request_cache If +true+, the request cache is used for this request. + # @option arguments [Boolean] :refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. + # This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. + # Unlike the delete API, it does not support `wait_for`. + # @option arguments [Boolean] :request_cache If `true`, the request cache is used for this request. # Defaults to the index-level setting. # @option arguments [Float] :requests_per_second The throttle for this request in sub-requests per second. Server default: -1. # @option arguments [String] :routing A custom value used to route operations to a specific shard. @@ -118,10 +122,10 @@ module Actions # @option arguments [Time] :search_timeout The explicit timeout for each search request. # It defaults to no timeout. # @option arguments [String] :search_type The type of the search operation. - # Available options include +query_then_fetch+ and +dfs_query_then_fetch+. + # Available options include `query_then_fetch` and `dfs_query_then_fetch`. # @option arguments [Integer, String] :slices The number of slices this task should be divided into. Server default: 1. - # @option arguments [Array] :sort A comma-separated list of +:+ pairs. - # @option arguments [Array] :stats The specific +tag+ of the request for logging and statistical purposes. + # @option arguments [Array] :sort A comma-separated list of `:` pairs. + # @option arguments [Array] :stats The specific `tag` of the request for logging and statistical purposes. # @option arguments [Integer] :terminate_after The maximum number of documents to collect for each shard. # If a query reaches this limit, Elasticsearch terminates the query early. # Elasticsearch collects documents before sorting.Use with caution. @@ -129,12 +133,12 @@ module Actions # When possible, let Elasticsearch perform early termination automatically. # Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. # @option arguments [Time] :timeout The period each deletion request waits for active shards. Server default: 1m. - # @option arguments [Boolean] :version If +true+, returns the document version as part of a hit. + # @option arguments [Boolean] :version If `true`, returns the document version as part of a hit. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). - # The +timeout+ value controls how long each write request waits for unavailable shards to become available. Server default: 1. - # @option arguments [Boolean] :wait_for_completion If +true+, the request blocks until the operation is complete. - # If +false+, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at +.tasks/task/${taskId}+. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. Server default: true. + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + # The `timeout` value controls how long each write request waits for unavailable shards to become available. Server default: 1. + # @option arguments [Boolean] :wait_for_completion If `true`, the request blocks until the operation is complete. + # If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/delete_by_query_rethrottle.rb b/elasticsearch-api/lib/elasticsearch/api/actions/delete_by_query_rethrottle.rb index b22377cca1..d94d59f53b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/delete_by_query_rethrottle.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/delete_by_query_rethrottle.rb @@ -27,7 +27,7 @@ module Actions # # @option arguments [String, Integer] :task_id The ID for the task. (*Required*) # @option arguments [Float] :requests_per_second The throttle for this request in sub-requests per second. - # To disable throttling, set it to +-1+. + # To disable throttling, set it to `-1`. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/delete_script.rb b/elasticsearch-api/lib/elasticsearch/api/actions/delete_script.rb index f224d397f3..8668018305 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/delete_script.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/delete_script.rb @@ -27,10 +27,10 @@ module Actions # @option arguments [String] :id The identifier for the stored script or search template. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/enrich/execute_policy.rb b/elasticsearch-api/lib/elasticsearch/api/actions/enrich/execute_policy.rb index e37f8e2dc3..373a99ab81 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/enrich/execute_policy.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/enrich/execute_policy.rb @@ -27,7 +27,7 @@ module Actions # # @option arguments [String] :name Enrich policy to execute. (*Required*) # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s. - # @option arguments [Boolean] :wait_for_completion If +true+, the request blocks other enrich policy execution requests until complete. Server default: true. + # @option arguments [Boolean] :wait_for_completion If `true`, the request blocks other enrich policy execution requests until complete. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/eql/delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/eql/delete.rb index 64d344296b..7291de2c91 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/eql/delete.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/eql/delete.rb @@ -28,7 +28,7 @@ module Actions # # @option arguments [String] :id Identifier for the search to delete. # A search ID is provided in the EQL search API's response for an async search. - # A search ID is also provided if the request’s +keep_on_completion+ parameter is +true+. (*Required*) + # A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query.rb b/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query.rb index 0a7b9bf844..d8f7246a12 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query.rb @@ -26,19 +26,19 @@ module Actions # Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. # The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. # - # @option arguments [Boolean] :allow_partial_results If +true+, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. - # If +false+, the query will fail if there are any failures.To override the default behavior, you can set the +esql.query.allow_partial_results+ cluster setting to +false+. Server default: true. + # @option arguments [Boolean] :allow_partial_results If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. + # If `false`, the query will fail if there are any failures.To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. Server default: true. # @option arguments [String] :delimiter The character to use between values within a CSV row. # It is valid only for the CSV format. - # @option arguments [Boolean] :drop_null_columns Indicates whether columns that are entirely +null+ will be removed from the +columns+ and +values+ portion of the results. - # If +true+, the response will include an extra section under the name +all_columns+ which has the name of all the columns. - # @option arguments [String] :format A short version of the Accept header, for example +json+ or +yaml+. + # @option arguments [Boolean] :drop_null_columns Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + # If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + # @option arguments [String] :format A short version of the Accept header, for example `json` or `yaml`. # @option arguments [Time] :keep_alive The period for which the query and its results are stored in the cluster. # The default period is five days. # When this period expires, the query and its results are deleted, even if the query is still ongoing. - # If the +keep_on_completion+ parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the +wait_for_completion_timeout+ parameter, regardless of this value. Server default: 5d. + # If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. Server default: 5d. # @option arguments [Boolean] :keep_on_completion Indicates whether the query and its results are stored in the cluster. - # If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the +wait_for_completion_timeout+ parameter. + # If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. # @option arguments [Time] :wait_for_completion_timeout The period to wait for the request to finish. # By default, the request waits for 1 second for the query results. # If the query completes during this period, results are returned diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_delete.rb index baaece9b87..d213517e52 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_delete.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_delete.rb @@ -27,11 +27,11 @@ module Actions # Otherwise, the stored results are deleted. # If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: # * The authenticated user that submitted the original query request - # * Users with the +cancel_task+ cluster privilege + # * Users with the `cancel_task` cluster privilege # # @option arguments [String] :id The unique identifier of the query. # A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. - # A query ID is also provided when the request was submitted with the +keep_on_completion+ parameter set to +true+. (*Required*) + # A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_get.rb index 481f3d093f..81bb6438fe 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_get.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_get.rb @@ -28,15 +28,15 @@ module Actions # # @option arguments [String] :id The unique identifier of the query. # A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. - # A query ID is also provided when the request was submitted with the +keep_on_completion+ parameter set to +true+. (*Required*) - # @option arguments [Boolean] :drop_null_columns Indicates whether columns that are entirely +null+ will be removed from the +columns+ and +values+ portion of the results. - # If +true+, the response will include an extra section under the name +all_columns+ which has the name of all the columns. + # A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. (*Required*) + # @option arguments [Boolean] :drop_null_columns Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + # If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. # @option arguments [Time] :keep_alive The period for which the query and its results are stored in the cluster. # When this period expires, the query and its results are deleted, even if the query is still ongoing. # @option arguments [Time] :wait_for_completion_timeout The period to wait for the request to finish. # By default, the request waits for complete query results. # If the request completes during the period specified in this parameter, complete query results are returned. - # Otherwise, the response returns an +is_running+ value of +true+ and no results. + # Otherwise, the response returns an `is_running` value of `true` and no results. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_stop.rb b/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_stop.rb index 56ad48971c..67650d35c6 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_stop.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/esql/async_query_stop.rb @@ -28,9 +28,9 @@ module Actions # # @option arguments [String] :id The unique identifier of the query. # A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. - # A query ID is also provided when the request was submitted with the +keep_on_completion+ parameter set to +true+. (*Required*) - # @option arguments [Boolean] :drop_null_columns Indicates whether columns that are entirely +null+ will be removed from the +columns+ and +values+ portion of the results. - # If +true+, the response will include an extra section under the name +all_columns+ which has the name of all the columns. + # A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. (*Required*) + # @option arguments [Boolean] :drop_null_columns Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + # If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/esql/query.rb b/elasticsearch-api/lib/elasticsearch/api/actions/esql/query.rb index cb9d328ba9..b881d829f1 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/esql/query.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/esql/query.rb @@ -30,10 +30,10 @@ module Actions # # @option arguments [String] :format A short version of the Accept header, e.g. json, yaml. # @option arguments [String] :delimiter The character to use between values within a CSV row. Only valid for the CSV format. - # @option arguments [Boolean] :drop_null_columns Should columns that are entirely +null+ be removed from the +columns+ and +values+ portion of the results? - # Defaults to +false+. If +true+ then the response will include an extra section under the name +all_columns+ which has the name of all columns. - # @option arguments [Boolean] :allow_partial_results If +true+, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. - # If +false+, the query will fail if there are any failures.To override the default behavior, you can set the +esql.query.allow_partial_results+ cluster setting to +false+. Server default: true. + # @option arguments [Boolean] :drop_null_columns Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? + # Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. + # @option arguments [Boolean] :allow_partial_results If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. + # If `false`, the query will fail if there are any failures.To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/exists.rb b/elasticsearch-api/lib/elasticsearch/api/actions/exists.rb index c1172382ee..b17d0c89d5 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/exists.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/exists.rb @@ -23,41 +23,43 @@ module API module Actions # Check a document. # Verify that a document exists. - # For example, check to see if a document with the +_id+ 0 exists: - # + + # For example, check to see if a document with the `_id` 0 exists: + # + # ``` # HEAD my-index-000001/_doc/0 - # + - # If the document exists, the API returns a status code of +200 - OK+. - # If the document doesn’t exist, the API returns +404 - Not Found+. + # ``` + # + # If the document exists, the API returns a status code of `200 - OK`. + # If the document doesn’t exist, the API returns `404 - Not Found`. # **Versioning support** - # You can use the +version+ parameter to check the document only if its current version is equal to the specified one. + # You can use the `version` parameter to check the document only if its current version is equal to the specified one. # Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. # The old version of the document doesn't disappear immediately, although you won't be able to access it. # Elasticsearch cleans up deleted documents in the background as you continue to index more data. # # @option arguments [String] :id A unique document identifier. (*Required*) # @option arguments [String] :index A comma-separated list of data streams, indices, and aliases. - # It supports wildcards (+*+). (*Required*) + # It supports wildcards (`*`). (*Required*) # @option arguments [String] :preference The node or shard the operation should be performed on. - # By default, the operation is randomized between the shard replicas.If it is set to +_local+, the operation will prefer to be run on a local allocated shard when possible. + # By default, the operation is randomized between the shard replicas.If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. # If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. # This can help with "jumping values" when hitting different shards in different refresh states. # A sample value can be something like the web session ID or the user name. - # @option arguments [Boolean] :realtime If +true+, the request is real-time as opposed to near-real-time. Server default: true. - # @option arguments [Boolean] :refresh If +true+, the request refreshes the relevant shards before retrieving the document. - # Setting it to +true+ should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). + # @option arguments [Boolean] :realtime If `true`, the request is real-time as opposed to near-real-time. Server default: true. + # @option arguments [Boolean] :refresh If `true`, the request refreshes the relevant shards before retrieving the document. + # Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). # @option arguments [String] :routing A custom value used to route operations to a specific shard. - # @option arguments [Boolean, String, Array] :_source Indicates whether to return the +_source+ field (+true+ or +false+) or lists the fields to return. + # @option arguments [Boolean, String, Array] :_source Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude from the response. - # You can also use this parameter to exclude fields from the subset specified in +_source_includes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response. # If this parameter is specified, only these source fields are returned. - # You can exclude fields from this subset using the +_source_excludes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can exclude fields from this subset using the `_source_excludes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [String, Array] :stored_fields A comma-separated list of stored fields to return as part of a hit. # If no fields are specified, no stored fields are included in the response. - # If this field is specified, the +_source+ parameter defaults to +false+. + # If this field is specified, the `_source` parameter defaults to `false`. # @option arguments [Integer] :version Explicit version number for concurrency control. # The specified version must match the current version of the document for the request to succeed. # @option arguments [String] :version_type The version type. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/exists_source.rb b/elasticsearch-api/lib/elasticsearch/api/actions/exists_source.rb index f7932922e5..4e87d1a0f3 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/exists_source.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/exists_source.rb @@ -24,21 +24,23 @@ module Actions # Check for a document source. # Check whether a document source exists in an index. # For example: - # + + # + # ``` # HEAD my-index-000001/_source/1 - # + + # ``` + # # A document's source is not available if it is disabled in the mapping. # # @option arguments [String] :id A unique identifier for the document. (*Required*) # @option arguments [String] :index A comma-separated list of data streams, indices, and aliases. - # It supports wildcards (+*+). (*Required*) + # It supports wildcards (`*`). (*Required*) # @option arguments [String] :preference The node or shard the operation should be performed on. # By default, the operation is randomized between the shard replicas. - # @option arguments [Boolean] :realtime If +true+, the request is real-time as opposed to near-real-time. Server default: true. - # @option arguments [Boolean] :refresh If +true+, the request refreshes the relevant shards before retrieving the document. - # Setting it to +true+ should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). + # @option arguments [Boolean] :realtime If `true`, the request is real-time as opposed to near-real-time. Server default: true. + # @option arguments [Boolean] :refresh If `true`, the request refreshes the relevant shards before retrieving the document. + # Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). # @option arguments [String] :routing A custom value used to route operations to a specific shard. - # @option arguments [Boolean, String, Array] :_source Indicates whether to return the +_source+ field (+true+ or +false+) or lists the fields to return. + # @option arguments [Boolean, String, Array] :_source Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude in the response. # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response. # @option arguments [Integer] :version The version number for concurrency control. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/explain.rb b/elasticsearch-api/lib/elasticsearch/api/actions/explain.rb index 735bc41728..6d852981d6 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/explain.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/explain.rb @@ -29,26 +29,26 @@ module Actions # @option arguments [String] :index Index names that are used to limit the request. # Only a single index name can be provided to this parameter. (*Required*) # @option arguments [String] :analyzer The analyzer to use for the query string. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [Boolean] :analyze_wildcard If +true+, wildcard and prefix queries are analyzed. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [String] :default_operator The default operator for query string query: +AND+ or +OR+. - # This parameter can be used only when the +q+ query string parameter is specified. Server default: OR. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [Boolean] :analyze_wildcard If `true`, wildcard and prefix queries are analyzed. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [String] :default_operator The default operator for query string query: `AND` or `OR`. + # This parameter can be used only when the `q` query string parameter is specified. Server default: OR. # @option arguments [String] :df The field to use as default where no field prefix is given in the query string. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [Boolean] :lenient If +true+, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - # This parameter can be used only when the +q+ query string parameter is specified. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [Boolean] :lenient If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [String] :preference The node or shard the operation should be performed on. # It is random by default. # @option arguments [String] :routing A custom value used to route operations to a specific shard. - # @option arguments [Boolean, String, Array] :_source +True+ or +false+ to return the +_source+ field or not or a list of fields to return. + # @option arguments [Boolean, String, Array] :_source `True` or `false` to return the `_source` field or not or a list of fields to return. # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude from the response. - # You can also use this parameter to exclude fields from the subset specified in +_source_includes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response. # If this parameter is specified, only these source fields are returned. - # You can exclude fields from this subset using the +_source_excludes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can exclude fields from this subset using the `_source_excludes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [String, Array] :stored_fields A comma-separated list of stored fields to return in the response. # @option arguments [String] :q The query in the Lucene query string syntax. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/features/get_features.rb b/elasticsearch-api/lib/elasticsearch/api/actions/features/get_features.rb index 75eb2a5bb1..dde473b8a4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/features/get_features.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/features/get_features.rb @@ -23,7 +23,7 @@ module API module Features module Actions # Get the features. - # Get a list of features that can be included in snapshots using the +feature_states+ field when creating a snapshot. + # Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. # You can use this API to determine which feature states to include when taking a snapshot. # By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. # A feature state includes one or more system indices necessary for a given feature to function. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/field_caps.rb b/elasticsearch-api/lib/elasticsearch/api/actions/field_caps.rb index 2f0de68202..3ff16ede2c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/field_caps.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/field_caps.rb @@ -25,15 +25,15 @@ module Actions # Get information about the capabilities of fields among multiple indices. # For data streams, the API returns field capabilities among the stream’s backing indices. # It returns runtime fields like any other field. - # For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the +keyword+ family. + # For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. # @option arguments [Boolean] :allow_no_indices If false, the request returns an error if any wildcard expression, index alias, - # or +_all+ value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request - # targeting +foo*,bar*+ returns an error if an index starts with foo but no index starts with bar. Server default: true. - # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as +open,hidden+. Server default: open. - # @option arguments [String, Array] :fields A comma-separated list of fields to retrieve capabilities for. Wildcard (+*+) expressions are supported. - # @option arguments [Boolean] :ignore_unavailable If +true+, missing or closed indices are not included in the response. + # or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request + # targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. Server default: true. + # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Server default: open. + # @option arguments [String, Array] :fields A comma-separated list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. + # @option arguments [Boolean] :ignore_unavailable If `true`, missing or closed indices are not included in the response. # @option arguments [Boolean] :include_unmapped If true, unmapped fields are included in the response. # @option arguments [String] :filters A comma-separated list of filters to apply to the response. # @option arguments [Array] :types A comma-separated list of field types to include. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/fleet/global_checkpoints.rb b/elasticsearch-api/lib/elasticsearch/api/actions/fleet/global_checkpoints.rb index 1163f0d4e5..5d741ba9d3 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/fleet/global_checkpoints.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/fleet/global_checkpoints.rb @@ -28,13 +28,13 @@ module Actions # # @option arguments [Indexname, Indexalias] :index A single index or index alias that resolves to a single index. (*Required*) # @option arguments [Boolean] :wait_for_advance A boolean value which controls whether to wait (until the timeout) for the global checkpoints - # to advance past the provided +checkpoints+. + # to advance past the provided `checkpoints`. # @option arguments [Boolean] :wait_for_index A boolean value which controls whether to wait (until the timeout) for the target index to exist - # and all primary shards be active. Can only be true when +wait_for_advance+ is true. - # @option arguments [Array] :checkpoints A comma separated list of previous global checkpoints. When used in combination with +wait_for_advance+, + # and all primary shards be active. Can only be true when `wait_for_advance` is true. + # @option arguments [Array] :checkpoints A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, # the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list # will cause Elasticsearch to immediately return the current global checkpoints. Server default: []. - # @option arguments [Time] :timeout Period to wait for a global checkpoints to advance past +checkpoints+. Server default: 30s. + # @option arguments [Time] :timeout Period to wait for a global checkpoints to advance past `checkpoints`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/fleet/msearch.rb b/elasticsearch-api/lib/elasticsearch/api/actions/fleet/msearch.rb index 4d75a5549f..f81edcf63a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/fleet/msearch.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/fleet/msearch.rb @@ -25,7 +25,7 @@ module Actions # Run multiple Fleet searches. # Run several Fleet searches with a single API request. # The API follows the same structure as the multi search API. - # However, similar to the Fleet search API, it supports the +wait_for_checkpoints+ parameter. + # However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. # This functionality is Experimental and may be changed or removed # completely in a future release. Elastic will take a best effort approach # to fix any issues, but experimental features are not subject to the @@ -48,7 +48,7 @@ module Actions # Elasticsearch to immediately execute the search. Server default: []. # @option arguments [Boolean] :allow_partial_search_results If true, returns partial results if there are shard request timeouts or shard failures. # If false, returns an error with no partial results. - # Defaults to the configured cluster setting +search.default_allow_partial_results+, which is true by default. + # Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body searches # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/fleet/search.rb b/elasticsearch-api/lib/elasticsearch/api/actions/fleet/search.rb index a2189664a0..434ba948e5 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/fleet/search.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/fleet/search.rb @@ -77,7 +77,7 @@ module Actions # Elasticsearch to immediately execute the search. Server default: []. # @option arguments [Boolean] :allow_partial_search_results If true, returns partial results if there are shard request timeouts or shard failures. # If false, returns an error with no partial results. - # Defaults to the configured cluster setting +search.default_allow_partial_results+, which is true by default. + # Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/get.rb index 22e6b397eb..7a7a573531 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/get.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/get.rb @@ -24,31 +24,39 @@ module Actions # Get a document by its ID. # Get a document and its source or stored fields from an index. # By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). - # In the case where stored fields are requested with the +stored_fields+ parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. - # To turn off realtime behavior, set the +realtime+ parameter to false. + # In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. + # To turn off realtime behavior, set the `realtime` parameter to false. # **Source filtering** - # By default, the API returns the contents of the +_source+ field unless you have used the +stored_fields+ parameter or the +_source+ field is turned off. - # You can turn off +_source+ retrieval by using the +_source+ parameter: - # + + # By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. + # You can turn off `_source` retrieval by using the `_source` parameter: + # + # ``` # GET my-index-000001/_doc/0?_source=false - # + - # If you only need one or two fields from the +_source+, use the +_source_includes+ or +_source_excludes+ parameters to include or filter out particular fields. + # ``` + # + # If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. # This can be helpful with large documents where partial retrieval can save on network overhead # Both parameters take a comma separated list of fields or wildcard expressions. # For example: - # + + # + # ``` # GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities - # + + # ``` + # # If you only want to specify includes, you can use a shorter notation: - # + + # + # ``` # GET my-index-000001/_doc/0?_source=*.id - # + + # ``` + # # **Routing** # If routing is used during indexing, the routing value also needs to be specified to retrieve a document. # For example: - # + + # + # ``` # GET my-index-000001/_doc/2?routing=user1 - # + + # ``` + # # This request gets the document with ID 2, but it is routed based on the user. # The document is not fetched if the correct routing is not specified. # **Distributed** @@ -57,37 +65,37 @@ module Actions # The replicas are the primary shard and its replicas within that shard ID group. # This means that the more replicas you have, the better your GET scaling will be. # **Versioning support** - # You can use the +version+ parameter to retrieve the document only if its current version is equal to the specified one. + # You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. # Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. # The old version of the document doesn't disappear immediately, although you won't be able to access it. # Elasticsearch cleans up deleted documents in the background as you continue to index more data. # # @option arguments [String] :id A unique document identifier. (*Required*) # @option arguments [String] :index The name of the index that contains the document. (*Required*) - # @option arguments [Boolean] :force_synthetic_source Indicates whether the request forces synthetic +_source+. - # Use this paramater to test if the mapping supports synthetic +_source+ and to get a sense of the worst case performance. + # @option arguments [Boolean] :force_synthetic_source Indicates whether the request forces synthetic `_source`. + # Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. # Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. # @option arguments [String] :preference The node or shard the operation should be performed on. - # By default, the operation is randomized between the shard replicas.If it is set to +_local+, the operation will prefer to be run on a local allocated shard when possible. + # By default, the operation is randomized between the shard replicas.If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. # If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. # This can help with "jumping values" when hitting different shards in different refresh states. # A sample value can be something like the web session ID or the user name. - # @option arguments [Boolean] :realtime If +true+, the request is real-time as opposed to near-real-time. Server default: true. - # @option arguments [Boolean] :refresh If +true+, the request refreshes the relevant shards before retrieving the document. - # Setting it to +true+ should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). + # @option arguments [Boolean] :realtime If `true`, the request is real-time as opposed to near-real-time. Server default: true. + # @option arguments [Boolean] :refresh If `true`, the request refreshes the relevant shards before retrieving the document. + # Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). # @option arguments [String] :routing A custom value used to route operations to a specific shard. - # @option arguments [Boolean, String, Array] :_source Indicates whether to return the +_source+ field (+true+ or +false+) or lists the fields to return. + # @option arguments [Boolean, String, Array] :_source Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude from the response. - # You can also use this parameter to exclude fields from the subset specified in +_source_includes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response. # If this parameter is specified, only these source fields are returned. - # You can exclude fields from this subset using the +_source_excludes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can exclude fields from this subset using the `_source_excludes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [String, Array] :stored_fields A comma-separated list of stored fields to return as part of a hit. # If no fields are specified, no stored fields are included in the response. - # If this field is specified, the +_source+ parameter defaults to +false+. - # Only leaf fields can be retrieved with the +stored_field+ option. + # If this field is specified, the `_source` parameter defaults to `false`. + # Only leaf fields can be retrieved with the `stored_field` option. # Object fields can't be returned;​if specified, the request fails. # @option arguments [Integer] :version The version number for concurrency control. # It must match the current version of the document for the request to succeed. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/get_script.rb b/elasticsearch-api/lib/elasticsearch/api/actions/get_script.rb index d2109ac11e..8391731b42 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/get_script.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/get_script.rb @@ -27,7 +27,7 @@ module Actions # @option arguments [String] :id The identifier for the stored script or search template. (*Required*) # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: . + # It can also be set to `-1` to indicate that the request should never timeout. Server default: . # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/get_source.rb b/elasticsearch-api/lib/elasticsearch/api/actions/get_source.rb index 086754c9df..bf95a37a31 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/get_source.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/get_source.rb @@ -24,23 +24,26 @@ module Actions # Get a document's source. # Get the source of a document. # For example: - # + + # + # ``` # GET my-index-000001/_source/1 - # + - # You can use the source filtering parameters to control which parts of the +_source+ are returned: - # + + # ``` + # + # You can use the source filtering parameters to control which parts of the `_source` are returned: + # + # ``` # GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities - # + + # ``` # # @option arguments [String] :id A unique document identifier. (*Required*) # @option arguments [String] :index The name of the index that contains the document. (*Required*) # @option arguments [String] :preference The node or shard the operation should be performed on. # By default, the operation is randomized between the shard replicas. - # @option arguments [Boolean] :realtime If +true+, the request is real-time as opposed to near-real-time. Server default: true. - # @option arguments [Boolean] :refresh If +true+, the request refreshes the relevant shards before retrieving the document. - # Setting it to +true+ should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). + # @option arguments [Boolean] :realtime If `true`, the request is real-time as opposed to near-real-time. Server default: true. + # @option arguments [Boolean] :refresh If `true`, the request refreshes the relevant shards before retrieving the document. + # Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). # @option arguments [String] :routing A custom value used to route operations to a specific shard. - # @option arguments [Boolean, String, Array] :_source Indicates whether to return the +_source+ field (+true+ or +false+) or lists the fields to return. + # @option arguments [Boolean, String, Array] :_source Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude in the response. # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response. # @option arguments [String, Array] :stored_fields A comma-separated list of stored fields to return as part of a hit. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/graph/explore.rb b/elasticsearch-api/lib/elasticsearch/api/actions/graph/explore.rb index 4edfda5d6f..bb2e696b70 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/graph/explore.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/graph/explore.rb @@ -25,7 +25,7 @@ module Actions # Explore graph analytics. # Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. # The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. - # An initial request to the +_explore+ API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. + # An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. # Subsequent requests enable you to spider out from one more vertices of interest. # You can exclude vertices that have already been returned. # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/index.rb b/elasticsearch-api/lib/elasticsearch/api/actions/index.rb index 3d19167f59..d102d50763 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/index.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/index.rb @@ -26,68 +26,68 @@ module Actions # If the target is an index and the document already exists, the request updates the document and increments its version. # NOTE: You cannot use this API to send update requests for existing documents in a data stream. # If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: - # * To add or overwrite a document using the +PUT //_doc/<_id>+ request format, you must have the +create+, +index+, or +write+ index privilege. - # * To add a document using the +POST //_doc/+ request format, you must have the +create_doc+, +create+, +index+, or +write+ index privilege. - # * To automatically create a data stream or index with this API request, you must have the +auto_configure+, +create_index+, or +manage+ index privilege. + # * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. + # * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. + # * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. # Automatic data stream creation requires a matching index template with data stream enabled. # NOTE: Replica shards might not all be started when an indexing operation returns successfully. - # By default, only the primary is required. Set +wait_for_active_shards+ to change this default behavior. + # By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. # **Automatically create data streams and indices** - # If the request's target doesn't exist and matches an index template with a +data_stream+ definition, the index operation automatically creates the data stream. + # If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. # If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. # NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. # If no mapping exists, the index operation creates a dynamic mapping. # By default, new fields and objects are automatically added to the mapping if needed. - # Automatic index creation is controlled by the +action.auto_create_index+ setting. - # If it is +true+, any index can be created automatically. - # You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to +false+ to turn off automatic index creation entirely. - # Specify a comma-separated list of patterns you want to allow or prefix each pattern with +++ or +-+ to indicate whether it should be allowed or blocked. + # Automatic index creation is controlled by the `action.auto_create_index` setting. + # If it is `true`, any index can be created automatically. + # You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. + # Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. # When a list is specified, the default behaviour is to disallow. - # NOTE: The +action.auto_create_index+ setting affects the automatic creation of indices only. + # NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. # It does not affect the creation of data streams. # **Optimistic concurrency control** - # Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the +if_seq_no+ and +if_primary_term+ parameters. - # If a mismatch is detected, the operation will result in a +VersionConflictException+ and a status code of +409+. + # Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. + # If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. # **Routing** # By default, shard placement — or routing — is controlled by using a hash of the document's ID value. - # For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the +routing+ parameter. - # When setting up explicit mapping, you can also use the +_routing+ field to direct the index operation to extract the routing value from the document itself. + # For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + # When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. # This does come at the (very minimal) cost of an additional document parsing pass. - # If the +_routing+ mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. - # NOTE: Data streams do not support custom routing unless they were created with the +allow_custom_routing+ setting enabled in the template. + # If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + # NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. # **Distributed** # The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. # After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. # **Active shards** # To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. # If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. - # By default, write operations only wait for the primary shards to be active before proceeding (that is to say +wait_for_active_shards+ is +1+). - # This default can be overridden in the index settings dynamically by setting +index.write.wait_for_active_shards+. - # To alter this behavior per operation, use the +wait_for_active_shards request+ parameter. - # Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is +number_of_replicas++1). + # By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). + # This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. + # To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + # Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). # Specifying a negative value or a number greater than the number of shard copies will throw an error. # For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). # If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. # This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. - # If +wait_for_active_shards+ is set on the request to +3+ (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. + # If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. # This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. - # However, if you set +wait_for_active_shards+ to +all+ (or to +4+, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. + # However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. # The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. # It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. # After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. - # The +_shards+ section of the API response reveals the number of shard copies on which replication succeeded and failed. + # The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. # **No operation (noop) updates** # When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. - # If this isn't acceptable use the +_update+ API with +detect_noop+ set to +true+. - # The +detect_noop+ option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. + # If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. + # The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. # There isn't a definitive rule for when noop updates aren't acceptable. # It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. # **Versioning** # Each indexed document is given a version number. # By default, internal versioning is used that starts at 1 and increments with each update, deletes included. # Optionally, the version number can be set to an external value (for example, if maintained in a database). - # To enable this functionality, +version_type+ should be set to +external+. - # The value provided must be a numeric, long value greater than or equal to 0, and less than around +9.2e+18+. + # To enable this functionality, `version_type` should be set to `external`. + # The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. # NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. # If no version is provided, the operation runs without any version checks. # When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. @@ -106,26 +106,26 @@ module Actions # Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. # # @option arguments [String] :id A unique identifier for the document. - # To automatically generate a document ID, use the +POST //_doc/+ request format and omit this parameter. + # To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. # @option arguments [String] :index The name of the data stream or index to target. - # If the target doesn't exist and matches the name or wildcard (+*+) pattern of an index template with a +data_stream+ definition, this request creates the data stream. + # If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. # If the target doesn't exist and doesn't match a data stream template, this request creates the index. # You can check for existing targets with the resolve index API. (*Required*) # @option arguments [Integer] :if_primary_term Only perform the operation if the document has this primary term. # @option arguments [Integer] :if_seq_no Only perform the operation if the document has this sequence number. # @option arguments [Boolean] :include_source_on_error True or false if to include the document source in the error message in case of parsing errors. Server default: true. - # @option arguments [String] :op_type Set to +create+ to only index the document if it does not already exist (put if absent). - # If a document with the specified +_id+ already exists, the indexing operation will fail. - # The behavior is the same as using the +/_create+ endpoint. - # If a document ID is specified, this paramater defaults to +index+. - # Otherwise, it defaults to +create+. - # If the request targets a data stream, an +op_type+ of +create+ is required. + # @option arguments [String] :op_type Set to `create` to only index the document if it does not already exist (put if absent). + # If a document with the specified `_id` already exists, the indexing operation will fail. + # The behavior is the same as using the `/_create` endpoint. + # If a document ID is specified, this paramater defaults to `index`. + # Otherwise, it defaults to `create`. + # If the request targets a data stream, an `op_type` of `create` is required. # @option arguments [String] :pipeline The ID of the pipeline to use to preprocess incoming documents. - # If the index has a default ingest pipeline specified, then setting the value to +_none+ disables the default ingest pipeline for this request. + # If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. # If a final pipeline is configured it will always run, regardless of the value of this parameter. - # @option arguments [String] :refresh If +true+, Elasticsearch refreshes the affected shards to make this operation visible to search. - # If +wait_for+, it waits for a refresh to make this operation visible to search. - # If +false+, it does nothing with refreshes. Server default: false. + # @option arguments [String] :refresh If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + # If `wait_for`, it waits for a refresh to make this operation visible to search. + # If `false`, it does nothing with refreshes. Server default: false. # @option arguments [String] :routing A custom value that is used to route operations to a specific shard. # @option arguments [Time] :timeout The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards.This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. # Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. @@ -135,9 +135,9 @@ module Actions # It must be a non-negative long number. # @option arguments [String] :version_type The version type. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # You can set it to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). - # The default value of +1+ means it waits for each primary shard to be active. Server default: 1. - # @option arguments [Boolean] :require_alias If +true+, the destination must be an index alias. + # You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + # The default value of `1` means it waits for each primary shard to be active. Server default: 1. + # @option arguments [Boolean] :require_alias If `true`, the destination must be an index alias. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body document # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/explain_lifecycle.rb b/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/explain_lifecycle.rb index d593ac122b..4ba32d54be 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/explain_lifecycle.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/explain_lifecycle.rb @@ -27,8 +27,8 @@ module Actions # For data streams, the API retrieves the current lifecycle status for the stream's backing indices. # The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. # - # @option arguments [String] :index Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (+*+). - # To target all data streams and indices, use +*+ or +_all+. (*Required*) + # @option arguments [String] :index Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). + # To target all data streams and indices, use `*` or `_all`. (*Required*) # @option arguments [Boolean] :only_errors Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. # @option arguments [Boolean] :only_managed Filters the returned indices to only indices that are managed by ILM. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/migrate_to_data_tiers.rb b/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/migrate_to_data_tiers.rb index 684a057125..0688a40249 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/migrate_to_data_tiers.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/migrate_to_data_tiers.rb @@ -32,13 +32,13 @@ module Actions # 1. Remove custom allocation settings from existing ILM policies. # 1. Replace custom allocation settings from existing indices with the corresponding tier preference. # ILM must be stopped before performing the migration. - # Use the stop ILM and get ILM status APIs to wait until the reported operation mode is +STOPPED+. + # Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. # # @option arguments [Boolean] :dry_run If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. # This provides a way to retrieve the indices and ILM policies that need to be migrated. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/move_to_step.rb b/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/move_to_step.rb index f9527f34dc..1790d5bf07 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/move_to_step.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/index_lifecycle_management/move_to_step.rb @@ -28,7 +28,7 @@ module Actions # You must specify both the current step and the step to be executed in the body of the request. # The request will fail if the current step does not match the step currently running for the index # This is to prevent the index from being moved from an unexpected step into the next step. - # When specifying the target (+next_step+) to which the index will be moved, either the name or both the action and name fields are optional. + # When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. # If only the phase is specified, the index will move to the first step of the first action in the target phase. # If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. # Only actions specified in the ILM policy are considered valid. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/add_block.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/add_block.rb index ee9133da0c..2c50f2f3d8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/add_block.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/add_block.rb @@ -28,22 +28,22 @@ module Actions # # @option arguments [String] :index A comma-separated list or wildcard expression of index names used to limit the request. # By default, you must explicitly name the indices you are adding blocks to. - # To allow the adding of blocks to indices with +_all+, +*+, or other wildcard expressions, change the +action.destructive_requires_name+ setting to +false+. - # You can update this setting in the +elasticsearch.yml+ file or by using the cluster update settings API. (*Required*) + # To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + # You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. (*Required*) # @option arguments [String] :block The block type to add to the index. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # It supports comma-separated values, such as +open,hidden+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # It supports comma-separated values, such as `open,hidden`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. # If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/analyze.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/analyze.rb index c7f6ffe3f5..c82416c526 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/analyze.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/analyze.rb @@ -25,12 +25,12 @@ module Actions # Get tokens from text analysis. # The analyze API performs analysis on a text string and returns the resulting tokens. # Generating excessive amount of tokens may cause a node to run out of memory. - # The +index.analyze.max_token_count+ setting enables you to limit the number of tokens that can be produced. + # The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. # If more than this limit of tokens gets generated, an error occurs. - # The +_analyze+ endpoint without a specified index will always use +10000+ as its limit. + # The `_analyze` endpoint without a specified index will always use `10000` as its limit. # # @option arguments [String] :index Index used to derive the analyzer. - # If specified, the +analyzer+ or field parameter overrides this value. + # If specified, the `analyzer` or field parameter overrides this value. # If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/clear_cache.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/clear_cache.rb index 25666554fb..051f465a2a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/clear_cache.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/clear_cache.rb @@ -26,24 +26,24 @@ module Actions # Clear the cache of one or more indices. # For data streams, the API clears the caches of the stream's backing indices. # By default, the clear cache API clears all caches. - # To clear only specific caches, use the +fielddata+, +query+, or +request+ parameters. - # To clear the cache only of specific fields, use the +fields+ parameter. + # To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. + # To clear the cache only of specific fields, use the `fields` parameter. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :fielddata If +true+, clears the fields cache. - # Use the +fields+ parameter to clear the cache of specific fields only. - # @option arguments [String, Array] :fields Comma-separated list of field names used to limit the +fielddata+ parameter. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :query If +true+, clears the query cache. - # @option arguments [Boolean] :request If +true+, clears the request cache. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :fielddata If `true`, clears the fields cache. + # Use the `fields` parameter to clear the cache of specific fields only. + # @option arguments [String, Array] :fields Comma-separated list of field names used to limit the `fielddata` parameter. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :query If `true`, clears the query cache. + # @option arguments [Boolean] :request If `true`, clears the request cache. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/clone.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/clone.rb index 61f761be3c..94765b5989 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/clone.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/clone.rb @@ -29,7 +29,7 @@ module Actions # The API also does not copy index metadata from the original index. # Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. # For example, if you clone a CCR follower index, the resulting clone will not be a follower index. - # The clone API copies most index settings from the source index to the resulting index, with the exception of +index.number_of_replicas+ and +index.auto_expand_replicas+. + # The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. # To set the number of replicas in the resulting index, configure these settings in the clone request. # Cloning works as follows: # * First, it creates a new target index with the same definition as the source index. @@ -42,10 +42,10 @@ module Actions # * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. # The current write index on a data stream cannot be cloned. # In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. - # NOTE: Mappings cannot be specified in the +_clone+ request. The mappings of the source index will be used for the target index. + # NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. # **Monitor the cloning process** - # The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the +wait_for_status+ parameter to +yellow+. - # The +_clone+ API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. + # The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. + # The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. # At this point, all shards are in the state unassigned. # If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. # Once the primary shard is allocated, it moves to state initializing, and the clone process begins. @@ -61,7 +61,7 @@ module Actions # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). Server default: 1. + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/close.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/close.rb index 0b406803bb..86b9f335bc 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/close.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/close.rb @@ -31,26 +31,26 @@ module Actions # The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. # You can open and close multiple indices. # An error is thrown if the request explicitly refers to a missing index. - # This behaviour can be turned off using the +ignore_unavailable=true+ parameter. + # This behaviour can be turned off using the `ignore_unavailable=true` parameter. # By default, you must explicitly name the indices you are opening or closing. - # To open or close indices with +_all+, +*+, or other wildcard expressions, change the+action.destructive_requires_name+ setting to +false+. This setting can also be changed with the cluster update settings API. + # To open or close indices with `_all`, `*`, or other wildcard expressions, change the`action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. # Closed indices consume a significant amount of disk-space which can cause problems in managed environments. - # Closing indices can be turned off with the cluster settings API by setting +cluster.indices.close.enable+ to +false+. + # Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. # # @option arguments [String, Array] :index Comma-separated list or wildcard expression of index names used to limit the request. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). Server default: 1. + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/create.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/create.rb index 80bf211587..493e9e5cce 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/create.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/create.rb @@ -31,29 +31,29 @@ module Actions # **Wait for active shards** # By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. # The index creation response will indicate what happened. - # For example, +acknowledged+ indicates whether the index was successfully created in the cluster, +while shards_acknowledged+ indicates whether the requisite number of shard copies were started for each shard in the index before timing out. - # Note that it is still possible for either +acknowledged+ or +shards_acknowledged+ to be +false+, but for the index creation to be successful. + # For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. + # Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. # These values simply indicate whether the operation completed before the timeout. - # If +acknowledged+ is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. - # If +shards_acknowledged+ is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, +acknowledged+ is +true+). - # You can change the default of only waiting for the primary shards to start through the index setting +index.write.wait_for_active_shards+. - # Note that changing this setting will also affect the +wait_for_active_shards+ value on all subsequent write operations. + # If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. + # If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). + # You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. + # Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. # # @option arguments [String] :index Name of the index you wish to create. # Index names must meet the following criteria: # - Lowercase only - # - Cannot include +\+, +/+, +*+, +?+, +"+, +<+, +>+, +|+, ++ (space character), +,+, or +#+ - # - Indices prior to 7.0 could contain a colon (+:+), but that has been deprecated and will not be supported in later versions - # - Cannot start with +-+, +_+, or +++ - # - Cannot be +.+ or +..+ + # - Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` `(space character),`,`, or`#` + # - Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions + # - Cannot start with `-`, `_`, or `+` + # - Cannot be `.` or `..` # - Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) - # - Names starting with +.+ are deprecated, except for hidden indices and internal indices managed by plugins (*Required*) + # - Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins (*Required*) # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). Server default: 1. + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/create_data_stream.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/create_data_stream.rb index 7492c972f7..58e0ff9811 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/create_data_stream.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/create_data_stream.rb @@ -27,9 +27,9 @@ module Actions # # @option arguments [String] :name Name of the data stream, which must meet the following criteria: # Lowercase only; - # Cannot include +\+, +/+, +*+, +?+, +"+, +<+, +>+, +|+, +,+, +#+, +:+, or a space character; - # Cannot start with +-+, +_+, +++, or +.ds-+; - # Cannot be +.+ or +..+; + # Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; + # Cannot start with `-`, `_`, `+`, or `.ds-`; + # Cannot be `.` or `..`; # Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. (*Required*) # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/data_streams_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/data_streams_stats.rb index 390865889c..cfef8e08f8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/data_streams_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/data_streams_stats.rb @@ -26,10 +26,10 @@ module Actions # Get statistics for one or more data streams. # # @option arguments [String] :name Comma-separated list of data streams used to limit the request. - # Wildcard expressions (+*+) are supported. - # To target all data streams in a cluster, omit this parameter or use +*+. + # Wildcard expressions (`*`) are supported. + # To target all data streams in a cluster, omit this parameter or use `*`. # @option arguments [String, Array] :expand_wildcards Type of data stream that wildcard patterns can match. - # Supports comma-separated values, such as +open,hidden+. Server default: open. + # Supports comma-separated values, such as `open,hidden`. Server default: open. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1 diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete.rb index f896e8e0d5..ae1462eed7 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete.rb @@ -31,15 +31,15 @@ module Actions # # @option arguments [String, Array] :index Comma-separated list of indices to delete. # You cannot specify index aliases. - # By default, this parameter does not support wildcards (+*+) or +_all+. - # To use wildcards or +_all+, set the +action.destructive_requires_name+ cluster setting to +false+. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # By default, this parameter does not support wildcards (`*`) or `_all`. + # To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. (*Required*) + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_alias.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_alias.rb index 4e19211bfc..b81d6fbc0f 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_alias.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_alias.rb @@ -26,9 +26,9 @@ module Actions # Removes a data stream or index from an alias. # # @option arguments [String, Array] :index Comma-separated list of data streams or indices used to limit the request. - # Supports wildcards (+*+). (*Required*) + # Supports wildcards (`*`). (*Required*) # @option arguments [String, Array] :name Comma-separated list of aliases to remove. - # Supports wildcards (+*+). To remove all aliases, use +*+ or +_all+. (*Required*) + # Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. (*Required*) # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_data_lifecycle.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_data_lifecycle.rb index 80154e61dd..d0d6ed20e1 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_data_lifecycle.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_data_lifecycle.rb @@ -25,7 +25,7 @@ module Actions # Delete data stream lifecycles. # Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. # - # @option arguments [String, Array] :name A comma-separated list of data streams of which the data stream lifecycle will be deleted; use +*+ to get all data streams (*Required*) + # @option arguments [String, Array] :name A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams (*Required*) # @option arguments [String, Array] :expand_wildcards Whether wildcard expressions should get expanded to open or closed indices (default: open) # @option arguments [Time] :master_timeout Specify timeout for connection to master # @option arguments [Time] :timeout Explicit timestamp for the document diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_data_stream.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_data_stream.rb index b99a604d4d..8a607d9138 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_data_stream.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_data_stream.rb @@ -25,9 +25,9 @@ module Actions # Delete data streams. # Deletes one or more data streams and their backing indices. # - # @option arguments [String, Array] :name Comma-separated list of data streams to delete. Wildcard (+*+) expressions are supported. (*Required*) + # @option arguments [String, Array] :name Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. (*Required*) # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. - # @option arguments [String, Array] :expand_wildcards Type of data stream that wildcard patterns can match. Supports comma-separated values,such as +open,hidden+. Server default: open. + # @option arguments [String, Array] :expand_wildcards Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. Server default: open. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_template.rb index 5619eb22c3..837db0515b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/delete_template.rb @@ -25,7 +25,7 @@ module Actions # Delete a legacy index template. # # @option arguments [String] :name The name of the legacy index template to delete. - # Wildcard (+*+) expressions are supported. (*Required*) + # Wildcard (`*`) expressions are supported. (*Required*) # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/disk_usage.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/disk_usage.rb index 27df7026e7..368a0d5e44 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/disk_usage.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/disk_usage.rb @@ -26,9 +26,9 @@ module Actions # Analyze the disk usage of each field of an index or data stream. # This API might not support indices created in previous Elasticsearch versions. # The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. - # NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index +store_size+ value because some small metadata files are ignored and some parts of data files might not be scanned by the API. + # NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. # Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. - # The stored size of the +_id+ field is likely underestimated while the +_source+ field is overestimated. + # The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. # This functionality is Experimental and may be changed or removed # completely in a future release. Elastic will take a best effort approach # to fix any issues, but experimental features are not subject to the @@ -36,17 +36,17 @@ module Actions # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. # It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. (*Required*) - # @option arguments [Boolean] :allow_no_indices If false, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # @option arguments [Boolean] :allow_no_indices If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. Server default: open. - # @option arguments [Boolean] :flush If +true+, the API performs a flush before analysis. - # If +false+, the response may not include uncommitted data. Server default: true. - # @option arguments [Boolean] :ignore_unavailable If +true+, missing or closed indices are not included in the response. + # Supports comma-separated values, such as `open,hidden`. Server default: open. + # @option arguments [Boolean] :flush If `true`, the API performs a flush before analysis. + # If `false`, the response may not include uncommitted data. Server default: true. + # @option arguments [Boolean] :ignore_unavailable If `true`, missing or closed indices are not included in the response. # @option arguments [Boolean] :run_expensive_tasks Analyzing field disk usage is resource-intensive. - # To use the API, this parameter must be set to +true+. + # To use the API, this parameter must be set to `true`. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/downsample.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/downsample.rb index 4a33d4f837..e7d156d4c4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/downsample.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/downsample.rb @@ -23,12 +23,12 @@ module API module Indices module Actions # Downsample an index. - # Aggregate a time series (TSDS) index and store pre-computed statistical summaries (+min+, +max+, +sum+, +value_count+ and +avg+) for each metric field grouped by a configured time interval. + # Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. # For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. # All documents within an hour interval are summarized and stored as a single document in the downsample index. # NOTE: Only indices in a time series data stream are supported. # Neither field nor document level security can be defined on the source index. - # The source index must be read only (+index.blocks.write: true+). + # The source index must be read only (`index.blocks.write: true`). # This functionality is Experimental and may be changed or removed # completely in a future release. Elastic will take a best effort approach # to fix any issues, but experimental features are not subject to the diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists.rb index e9396806df..ac2c82a679 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists.rb @@ -25,17 +25,17 @@ module Actions # Check indices. # Check if one or more indices, index aliases, or data streams exist. # - # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases. Supports wildcards (+*+). (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). (*Required*) + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :flat_settings If +true+, returns settings in flat format. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :include_defaults If +true+, return all default settings in the response. - # @option arguments [Boolean] :local If +true+, the request retrieves information from the local node only. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :flat_settings If `true`, returns settings in flat format. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :include_defaults If `true`, return all default settings in the response. + # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists_alias.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists_alias.rb index b80dffa533..db5fd43e6c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists_alias.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists_alias.rb @@ -25,16 +25,16 @@ module Actions # Check aliases. # Check if one or more data stream or index aliases exist. # - # @option arguments [String, Array] :name Comma-separated list of aliases to check. Supports wildcards (+*+). (*Required*) - # @option arguments [String, Array] :index Comma-separated list of data streams or indices used to limit the request. Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # @option arguments [String, Array] :name Comma-separated list of aliases to check. Supports wildcards (`*`). (*Required*) + # @option arguments [String, Array] :index Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, requests that include a missing data stream or index in the target indices or data streams return an error. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists_template.rb index 2c755ef89e..0248e2fa17 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/exists_template.rb @@ -28,12 +28,12 @@ module Actions # IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. # # @option arguments [String, Array] :name A comma-separated list of index template names used to limit the request. - # Wildcard (+*+) expressions are supported. (*Required*) + # Wildcard (`*`) expressions are supported. (*Required*) # @option arguments [Boolean] :flat_settings Indicates whether to use a flat format for the response. # @option arguments [Boolean] :local Indicates whether to get information from the local node only. # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/field_usage_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/field_usage_stats.rb index 8c9285caa9..a3c3b6e606 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/field_usage_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/field_usage_stats.rb @@ -34,13 +34,13 @@ module Actions # support SLA of official GA features. # # @option arguments [String, Array] :index Comma-separated list or wildcard expression of index names used to limit the request. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # @option arguments [Boolean] :ignore_unavailable If +true+, missing or closed indices are not included in the response. + # Supports comma-separated values, such as `open,hidden`. + # @option arguments [Boolean] :ignore_unavailable If `true`, missing or closed indices are not included in the response. # @option arguments [String, Array] :fields Comma-separated list or wildcard expressions of fields to include in the statistics. # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/flush.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/flush.rb index cd4e96d82e..d8f53c31bc 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/flush.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/flush.rb @@ -33,18 +33,18 @@ module Actions # If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases to flush. - # Supports wildcards (+*+). - # To flush all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports wildcards (`*`). + # To flush all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :force If +true+, the request forces a flush even if there are no changes to commit to the index. Server default: true. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :wait_if_ongoing If +true+, the flush operation blocks until execution when another flush operation is running. - # If +false+, Elasticsearch returns an error if you request a flush when another flush operation is running. Server default: true. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :force If `true`, the request forces a flush even if there are no changes to commit to the index. Server default: true. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :wait_if_ongoing If `true`, the flush operation blocks until execution when another flush operation is running. + # If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/forcemerge.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/forcemerge.rb index 7a30c2d2e0..d60d1ee08e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/forcemerge.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/forcemerge.rb @@ -34,13 +34,13 @@ module Actions # So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. # If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. # **Blocks during a force merge** - # Calls to this API block until the merge is complete (unless request contains +wait_for_completion=false+). + # Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). # If the client connection is lost before completion then the force merge process will continue in the background. # Any new requests to force merge the same indices will also block until the ongoing force merge is complete. # **Running force merge asynchronously** - # If the request contains +wait_for_completion=false+, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. + # If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. # However, you can not cancel this task as the force merge task is not cancelable. - # Elasticsearch creates a record of this task as a document at +_tasks/+. + # Elasticsearch creates a record of this task as a document at `_tasks/`. # When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. # **Force merging multiple indices** # You can force merge multiple indices with a single request by targeting: @@ -49,21 +49,22 @@ module Actions # * One or more aliases # * All data streams and indices in a cluster # Each targeted shard is force-merged separately using the force_merge threadpool. - # By default each node only has a single +force_merge+ thread which means that the shards on that node are force-merged one at a time. - # If you expand the +force_merge+ threadpool on a node then it will force merge its shards in parallel - # Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case +max_num_segments parameter+ is set to +1+, to rewrite all segments into a new one. + # By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. + # If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel + # Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. # **Data streams and time-based indices** # Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. # In these cases, each index only receives indexing traffic for a certain period of time. # Once an index receive no more writes, its shards can be force-merged to a single segment. # This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. # For example: - # + + # + # ``` # POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 - # + + # ``` # - # @option arguments [String, Array] :index A comma-separated list of index names; use +_all+ or empty string to perform the operation on all indices - # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes +_all+ string or when no indices have been specified) + # @option arguments [String, Array] :index A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices + # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. # @option arguments [Boolean] :flush Specify whether the index should be flushed after performing the operation (default: true) # @option arguments [Boolean] :ignore_unavailable Whether specified concrete indices should be ignored when unavailable (missing or closed) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_alias.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_alias.rb index 7f54b919bb..b14f665bf8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_alias.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_alias.rb @@ -26,18 +26,18 @@ module Actions # Retrieves information for one or more data stream or index aliases. # # @option arguments [String, Array] :name Comma-separated list of aliases to retrieve. - # Supports wildcards (+*+). - # To retrieve all aliases, omit this parameter or use +*+ or +_all+. + # Supports wildcards (`*`). + # To retrieve all aliases, omit this parameter or use `*` or `_all`. # @option arguments [String, Array] :index Comma-separated list of data streams or indices used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_data_lifecycle.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_data_lifecycle.rb index d4e33ecb01..fc192ec59e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_data_lifecycle.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_data_lifecycle.rb @@ -26,12 +26,12 @@ module Actions # Get the data stream lifecycle configuration of one or more data streams. # # @option arguments [String, Array] :name Comma-separated list of data streams to limit the request. - # Supports wildcards (+*+). - # To target all data streams, omit this parameter or use +*+ or +_all+. (*Required*) + # Supports wildcards (`*`). + # To target all data streams, omit this parameter or use `*` or `_all`. (*Required*) # @option arguments [String, Array] :expand_wildcards Type of data stream that wildcard patterns can match. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :include_defaults If +true+, return all default settings in the response. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :include_defaults If `true`, return all default settings in the response. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_data_stream.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_data_stream.rb index 3e35d02f68..538398f652 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_data_stream.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_data_stream.rb @@ -26,9 +26,9 @@ module Actions # Get information about one or more data streams. # # @option arguments [String, Array] :name Comma-separated list of data stream names used to limit the request. - # Wildcard (+*+) expressions are supported. If omitted, all data streams are returned. + # Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. # @option arguments [String, Array] :expand_wildcards Type of data stream that wildcard patterns can match. - # Supports comma-separated values, such as +open,hidden+. Server default: open. + # Supports comma-separated values, such as `open,hidden`. Server default: open. # @option arguments [Boolean] :include_defaults If true, returns all relevant default configurations for the index template. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Boolean] :verbose Whether the maximum timestamp for each data stream should be calculated and returned. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_field_mapping.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_field_mapping.rb index c16b6cd2c0..ebaae20d04 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_field_mapping.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_field_mapping.rb @@ -28,18 +28,18 @@ module Actions # This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. # # @option arguments [String, Array] :fields Comma-separated list or wildcard expression of fields used to limit returned information. - # Supports wildcards (+*+). (*Required*) + # Supports wildcards (`*`). (*Required*) # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :include_defaults If +true+, return all default settings in the response. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :include_defaults If `true`, return all default settings in the response. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_mapping.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_mapping.rb index 067e4e7a5c..cb7d3b7a35 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_mapping.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_mapping.rb @@ -26,16 +26,16 @@ module Actions # For data streams, the API retrieves mappings for the stream’s backing indices. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :local If +true+, the request retrieves information from the local node only. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_settings.rb index 2d8dc4dc06..6441a04db4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_settings.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_settings.rb @@ -27,22 +27,22 @@ module Actions # For data streams, it returns setting information for the stream's backing indices. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit - # the request. Supports wildcards (+*+). To target all data streams and - # indices, omit this parameter or use +*+ or +_all+. + # the request. Supports wildcards (`*`). To target all data streams and + # indices, omit this parameter or use `*` or `_all`. # @option arguments [String, Array] :name Comma-separated list or wildcard expression of settings to retrieve. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index - # alias, or +_all+ value targets only missing or closed indices. This + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index + # alias, or `_all` value targets only missing or closed indices. This # behavior applies even if the request targets other open indices. For - # example, a request targeting +foo*,bar*+ returns an error if an index - # starts with foo but no index starts with +bar+. Server default: true. + # example, a request targeting `foo*,bar*` returns an error if an index + # starts with foo but no index starts with `bar`. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. Server default: open. - # @option arguments [Boolean] :flat_settings If +true+, returns settings in flat format. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :include_defaults If +true+, return all default settings in the response. - # @option arguments [Boolean] :local If +true+, the request retrieves information from the local node only. If - # +false+, information is retrieved from the master node. + # Supports comma-separated values, such as `open,hidden`. Server default: open. + # @option arguments [Boolean] :flat_settings If `true`, returns settings in flat format. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :include_defaults If `true`, return all default settings in the response. + # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only. If + # `false`, information is retrieved from the master node. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is # received before the timeout expires, the request fails and returns an # error. Server default: 30s. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_template.rb index 2f8e1b7319..d40137e6f4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/get_template.rb @@ -27,10 +27,10 @@ module Actions # IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. # # @option arguments [String, Array] :name Comma-separated list of index template names used to limit the request. - # Wildcard (+*+) expressions are supported. - # To return all index templates, omit this parameter or use a value of +_all+ or +*+. - # @option arguments [Boolean] :flat_settings If +true+, returns settings in flat format. - # @option arguments [Boolean] :local If +true+, the request retrieves information from the local node only. + # Wildcard (`*`) expressions are supported. + # To return all index templates, omit this parameter or use a value of `_all` or `*`. + # @option arguments [Boolean] :flat_settings If `true`, returns settings in flat format. + # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/migrate_to_data_stream.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/migrate_to_data_stream.rb index eac8dc5102..93492ad3d7 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/migrate_to_data_stream.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/migrate_to_data_stream.rb @@ -27,7 +27,7 @@ module Actions # You must have a matching index template that is data stream enabled. # The alias must meet the following criteria: # The alias must have a write index; - # All indices for the alias must have a +@timestamp+ field mapping of a +date+ or +date_nanos+ field type; + # All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; # The alias must not have any filters; # The alias must not use custom routing. # If successful, the request removes the alias and creates a data stream with the same name. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/open.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/open.rb index 477f5c1ff3..584bd00926 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/open.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/open.rb @@ -32,32 +32,32 @@ module Actions # The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. # You can open and close multiple indices. # An error is thrown if the request explicitly refers to a missing index. - # This behavior can be turned off by using the +ignore_unavailable=true+ parameter. + # This behavior can be turned off by using the `ignore_unavailable=true` parameter. # By default, you must explicitly name the indices you are opening or closing. - # To open or close indices with +_all+, +*+, or other wildcard expressions, change the +action.destructive_requires_name+ setting to +false+. + # To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. # This setting can also be changed with the cluster update settings API. # Closed indices consume a significant amount of disk-space which can cause problems in managed environments. - # Closing indices can be turned off with the cluster settings API by setting +cluster.indices.close.enable+ to +false+. - # Because opening or closing an index allocates its shards, the +wait_for_active_shards+ setting on index creation applies to the +_open+ and +_close+ index actions as well. + # Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + # Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). + # Supports wildcards (`*`). # By default, you must explicitly name the indices you using to limit the request. - # To limit a request using +_all+, +*+, or other wildcard expressions, change the +action.destructive_requires_name+ setting to false. - # You can update this setting in the +elasticsearch.yml+ file or using the cluster update settings API. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. + # You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. (*Required*) + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). Server default: 1. + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_alias.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_alias.rb index 6a97a06dea..57ea6e7491 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_alias.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_alias.rb @@ -26,7 +26,7 @@ module Actions # Adds a data stream or index to an alias. # # @option arguments [String, Array] :index Comma-separated list of data streams or indices to add. - # Supports wildcards (+*+). + # Supports wildcards (`*`). # Wildcard patterns that match both data streams and indices return an error. (*Required*) # @option arguments [String] :name Alias to update. # If the alias doesn’t exist, the request creates it. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_data_lifecycle.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_data_lifecycle.rb index ef432677f4..310e0ddf09 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_data_lifecycle.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_data_lifecycle.rb @@ -26,11 +26,11 @@ module Actions # Update the data stream lifecycle of the specified data streams. # # @option arguments [String, Array] :name Comma-separated list of data streams used to limit the request. - # Supports wildcards (+*+). - # To target all data streams use +*+ or +_all+. (*Required*) + # Supports wildcards (`*`). + # To target all data streams use `*` or `_all`. (*Required*) # @option arguments [String, Array] :expand_wildcards Type of data stream that wildcard patterns can match. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +hidden+, +open+, +closed+, +none+. Server default: open. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `hidden`, `open`, `closed`, `none`. Server default: open. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is # received before the timeout expires, the request fails and returns an # error. Server default: 30s. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_index_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_index_template.rb index 07324e2b01..954df550d2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_index_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_index_template.rb @@ -29,23 +29,23 @@ module Actions # For data streams, these settings and mappings are applied when the stream's backing indices are created. # Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. # Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. - # You can use C-style +/* *\/+ block comments in index templates. + # You can use C-style `/* *\/` block comments in index templates. # You can include comments anywhere in the request body, except before the opening curly bracket. # **Multiple matching templates** # If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. # Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. # **Composing aliases, mappings, and settings** - # When multiple component templates are specified in the +composed_of+ field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. + # When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. # Any mappings, settings, or aliases from the parent index template are merged in next. # Finally, any configuration on the index request itself is merged. # Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. # If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. - # This recursive merging strategy applies not only to field mappings, but also root options like +dynamic_templates+ and +meta+. - # If an earlier component contains a +dynamic_templates+ block, then by default new +dynamic_templates+ entries are appended onto the end. + # This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. + # If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. # If an entry already exists with the same key, then it is overwritten by the new definition. # # @option arguments [String] :name Index or template name (*Required*) - # @option arguments [Boolean] :create If +true+, this request cannot replace or update existing index templates. + # @option arguments [Boolean] :create If `true`, this request cannot replace or update existing index templates. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [String] :cause User defined reason for creating/updating the index template diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_mapping.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_mapping.rb index 339b61f807..07f3cfa3bc 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_mapping.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_mapping.rb @@ -33,7 +33,7 @@ module Actions # You can populate the new multi-field with the update by query API. # **Change supported mapping parameters for an existing field** # The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. - # For example, you can use the update mapping API to update the +ignore_above+ parameter. + # For example, you can use the update mapping API to update the `ignore_above` parameter. # **Change the mapping of an existing field** # Except for supported mapping parameters, you can't change the mapping or field type of an existing field. # Changing an existing field could invalidate data that's already indexed. @@ -43,19 +43,19 @@ module Actions # Renaming a field would invalidate data already indexed under the old field name. # Instead, add an alias field to create an alternate field name. # - # @option arguments [String, Array] :index A comma-separated list of index names the mapping should be added to (supports wildcards); use +_all+ or omit to add the mapping on all indices. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # @option arguments [String, Array] :index A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. (*Required*) + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. - # @option arguments [Boolean] :write_index_only If +true+, the mappings are applied only to the current write index for the target. + # @option arguments [Boolean] :write_index_only If `true`, the mappings are applied only to the current write index for the target. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_settings.rb index 9357527324..961018dcbe 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_settings.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_settings.rb @@ -27,7 +27,7 @@ module Actions # For data streams, index setting changes are applied to all backing indices by default. # To revert a setting to the default value, use a null value. # The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. - # To preserve existing settings from being updated, set the +preserve_existing+ parameter to +true+. + # To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. # NOTE: You can only define new analyzers on closed indices. # To add an analyzer, you must close the index, define the analyzer, and reopen the index. # You cannot close the write index of a data stream. @@ -38,25 +38,25 @@ module Actions # To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit - # the request. Supports wildcards (+*+). To target all data streams and - # indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index - # alias, or +_all+ value targets only missing or closed indices. This + # the request. Supports wildcards (`*`). To target all data streams and + # indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index + # alias, or `_all` value targets only missing or closed indices. This # behavior applies even if the request targets other open indices. For - # example, a request targeting +foo*,bar*+ returns an error if an index - # starts with +foo+ but no index starts with +bar+. + # example, a request targeting `foo*,bar*` returns an error if an index + # starts with `foo` but no index starts with `bar`. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. If the request can target # data streams, this argument determines whether wildcard expressions match # hidden data streams. Supports comma-separated values, such as - # +open,hidden+. Server default: open. - # @option arguments [Boolean] :flat_settings If +true+, returns settings in flat format. - # @option arguments [Boolean] :ignore_unavailable If +true+, returns settings in flat format. + # `open,hidden`. Server default: open. + # @option arguments [Boolean] :flat_settings If `true`, returns settings in flat format. + # @option arguments [Boolean] :ignore_unavailable If `true`, returns settings in flat format. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is # received before the timeout expires, the request fails and returns an # error. Server default: 30s. - # @option arguments [Boolean] :preserve_existing If +true+, existing index settings remain unchanged. + # @option arguments [Boolean] :preserve_existing If `true`, existing index settings remain unchanged. # @option arguments [Boolean] :reopen Whether to close and reopen the index to apply non-dynamic settings. - # If set to +true+ the indices to which the settings are being applied + # If set to `true` the indices to which the settings are being applied # will be closed temporarily and then reopened in order to apply the changes. # @option arguments [Time] :timeout Period to wait for a response. If no response is received before the # timeout expires, the request fails and returns an error. Server default: 30s. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_template.rb index c8069ff75b..8dc66d050d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/put_template.rb @@ -31,7 +31,7 @@ module Actions # Index templates are only applied during index creation. # Changes to index templates do not affect existing indices. # Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. - # You can use C-style +/* *\/+ block comments in index templates. + # You can use C-style `/* *\/` block comments in index templates. # You can include comments anywhere in the request body, except before the opening curly bracket. # **Indices matching multiple templates** # Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/recovery.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/recovery.rb index 8734419c83..1a1bce9660 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/recovery.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/recovery.rb @@ -41,10 +41,10 @@ module Actions # This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :active_only If +true+, the response only includes ongoing shard recoveries. - # @option arguments [Boolean] :detailed If +true+, the response includes detailed information about shard recoveries. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :active_only If `true`, the response only includes ongoing shard recoveries. + # @option arguments [Boolean] :detailed If `true`, the response includes detailed information about shard recoveries. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/refresh.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/refresh.rb index f08348c1cb..404555b6fe 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/refresh.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/refresh.rb @@ -26,23 +26,23 @@ module Actions # A refresh makes recent operations performed on one or more indices available for search. # For data streams, the API runs the refresh operation on the stream’s backing indices. # By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. - # You can change this default interval with the +index.refresh_interval+ setting. + # You can change this default interval with the `index.refresh_interval` setting. # Refresh requests are synchronous and do not return a response until the refresh operation completes. # Refreshes are resource-intensive. # To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. - # If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's +refresh=wait_for+ query parameter option. + # If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. # This option ensures the indexing operation waits for a periodic refresh before running the search. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/reload_search_analyzers.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/reload_search_analyzers.rb index 8f57bf9584..258fb02a8f 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/reload_search_analyzers.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/reload_search_analyzers.rb @@ -26,8 +26,8 @@ module Actions # Reload an index's search analyzers and their resources. # For data streams, the API reloads search analyzers and resources for the stream's backing indices. # IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. - # You can use the reload search analyzers API to pick up changes to synonym files used in the +synonym_graph+ or +synonym+ token filter of a search analyzer. - # To be eligible, the token filter must have an +updateable+ flag of +true+ and only be used in search analyzers. + # You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. + # To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. # NOTE: This API does not perform a reload for each shard of an index. # Instead, it performs a reload for each node containing index shards. # As a result, the total shard count returned by the API can differ from the number of index shards. @@ -35,7 +35,7 @@ module Actions # This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. # # @option arguments [String, Array] :index A comma-separated list of index names to reload analyzers for (*Required*) - # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes +_all+ string or when no indices have been specified) + # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. # @option arguments [Boolean] :ignore_unavailable Whether specified concrete indices should be ignored when unavailable (missing or closed) # @option arguments [String] :resource Changed resource to reload analyzers from if applicable diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/resolve_cluster.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/resolve_cluster.rb index 50dcf75310..6b1d99d1ab 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/resolve_cluster.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/resolve_cluster.rb @@ -29,49 +29,49 @@ module Actions # You use the same index expression with this endpoint as you would for cross-cluster search. # Index and cluster exclusions are also supported with this endpoint. # For each cluster in the index expression, information is returned about: - # * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the +remote/info+ endpoint. - # * Whether each remote cluster is configured with +skip_unavailable+ as +true+ or +false+. + # * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. + # * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. # * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. # * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). # * Cluster version information, including the Elasticsearch server version. - # For example, +GET /_resolve/cluster/my-index-*,cluster*:my-index-*+ returns information about the local cluster and all remotely configured clusters that start with the alias +cluster*+. - # Each cluster returns information about whether it has any indices, aliases or data streams that match +my-index-*+.The ability to query without an index expression was added in version 8.18, so when + # For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. + # Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`.The ability to query without an index expression was added in version 8.18, so when # querying remote clusters older than that, the local cluster will send the index - # expression +dummy*+ to those remote clusters. Thus, if an errors occur, you may see a reference + # expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference # to that index expression even though you didn't request it. If it causes a problem, you can - # instead include an index expression like +*:*+ to bypass the issue.You may want to exclude a cluster or index from a search when: - # * A remote cluster is not currently connected and is configured with +skip_unavailable=false+. Running a cross-cluster search under those conditions will cause the entire search to fail. - # * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is +logs*,remote1:logs*+ and the remote1 cluster has no indices, aliases or data streams that match +logs*+. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. - # * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the +_resolve/cluster+ response will be present. (This is also where security/permission errors will be shown.) - # * A remote cluster is an older version that does not support the feature you want to use in your search.The +remote/info+ endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. + # instead include an index expression like `*:*` to bypass the issue.You may want to exclude a cluster or index from a search when: + # * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. + # * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. + # * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) + # * A remote cluster is an older version that does not support the feature you want to use in your search.The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. # The remote cluster may be available, while the local cluster is not currently connected to it. - # You can use the +_resolve/cluster+ API to attempt to reconnect to remote clusters. - # For example with +GET _resolve/cluster+ or +GET _resolve/cluster/*:*+. - # The +connected+ field in the response will indicate whether it was successful. - # If a connection was (re-)established, this will also cause the +remote/info+ endpoint to now indicate a connected status. + # You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. + # For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. + # The `connected` field in the response will indicate whether it was successful. + # If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. # # @option arguments [String, Array] :name A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. - # Resources on remote clusters can be specified using the ++:++ syntax. - # Index and cluster exclusions (e.g., +-cluster1:*+) are also supported. + # Resources on remote clusters can be specified using the ``:`` syntax. + # Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. # If no index expression is specified, information about all remote clusters configured on the local cluster # is returned without doing any index matching - # @option arguments [Boolean] :allow_no_indices If false, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing + # @option arguments [Boolean] :allow_no_indices If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing # or closed indices. This behavior applies even if the request targets other open indices. For example, a request - # targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. + # targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. # NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index - # options to the +_resolve/cluster+ API endpoint that takes no index expression. Server default: true. + # options to the `_resolve/cluster` API endpoint that takes no index expression. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. # NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index - # options to the +_resolve/cluster+ API endpoint that takes no index expression. Server default: open. + # options to the `_resolve/cluster` API endpoint that takes no index expression. Server default: open. # @option arguments [Boolean] :ignore_throttled If true, concrete, expanded, or aliased indices are ignored when frozen. # NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index - # options to the +_resolve/cluster+ API endpoint that takes no index expression. + # options to the `_resolve/cluster` API endpoint that takes no index expression. # @option arguments [Boolean] :ignore_unavailable If false, the request returns an error if it targets a missing or closed index. # NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index - # options to the +_resolve/cluster+ API endpoint that takes no index expression. + # options to the `_resolve/cluster` API endpoint that takes no index expression. # @option arguments [Time] :timeout The maximum time to wait for remote clusters to respond. # If a remote cluster does not respond within this timeout period, the API response # will show the cluster as not connected and include an error message that the diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/resolve_index.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/resolve_index.rb index 2e0331a3f3..872cc99c72 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/resolve_index.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/resolve_index.rb @@ -27,15 +27,15 @@ module Actions # Multiple patterns and remote clusters are supported. # # @option arguments [String, Array] :name Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. - # Resources on remote clusters can be specified using the ++:++ syntax. (*Required*) + # Resources on remote clusters can be specified using the ``:`` syntax. (*Required*) # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/rollover.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/rollover.rb index 720fbd318a..5a2c4db2be 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/rollover.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/rollover.rb @@ -34,32 +34,32 @@ module Actions # TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. # Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. # If an index alias points to multiple indices, one of the indices must be a write index. - # The rollover API creates a new write index for the alias with +is_write_index+ set to +true+. - # The API also +sets is_write_index+ to +false+ for the previous write index. + # The rollover API creates a new write index for the alias with `is_write_index` set to `true`. + # The API also `sets is_write_index` to `false` for the previous write index. # **Roll over an index alias with one index** # If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. - # NOTE: A rollover creates a new index and is subject to the +wait_for_active_shards+ setting. + # NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. # **Increment index names for an alias** # When you roll over an index alias, you can specify a name for the new index. - # If you don't specify a name and the current index ends with +-+ and a number, such as +my-index-000001+ or +my-index-3+, the new index name increments that number. - # For example, if you roll over an alias with a current index of +my-index-000001+, the rollover creates a new index named +my-index-000002+. + # If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. + # For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. # This number is always six characters and zero-padded, regardless of the previous index's name. # If you use an index alias for time series data, you can use date math in the index name to track the rollover date. - # For example, you can create an alias that points to an index named ++. - # If you create the index on May 6, 2099, the index's name is +my-index-2099.05.06-000001+. - # If you roll over the alias on May 7, 2099, the new index's name is +my-index-2099.05.07-000002+. + # For example, you can create an alias that points to an index named ``. + # If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. + # If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. # # @option arguments [String] :alias Name of the data stream or index alias to roll over. (*Required*) # @option arguments [String] :new_index Name of the index to create. # Supports date math. # Data streams do not support this parameter. - # @option arguments [Boolean] :dry_run If +true+, checks whether the current index satisfies the specified conditions but does not perform a rollover. + # @option arguments [Boolean] :dry_run If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to all or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). Server default: 1. + # Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). Server default: 1. # @option arguments [Boolean] :lazy If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. # Only allowed on data streams. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/segments.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/segments.rb index 33719cd46f..27ce36ba3c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/segments.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/segments.rb @@ -27,15 +27,15 @@ module Actions # For data streams, the API returns information about the stream's backing indices. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request. - # Supports wildcards (+*+). - # To target all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports wildcards (`*`). + # To target all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/shrink.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/shrink.rb index 2df762cdf1..214a86471b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/shrink.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/shrink.rb @@ -38,7 +38,7 @@ module Actions # A shrink operation: # * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. # * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. - # * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the +.routing.allocation.initial_recovery._id+ index setting. + # * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. # IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: # * The target index must not exist. # * The source index must have more primary shards than the target index. @@ -53,7 +53,7 @@ module Actions # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). Server default: 1. + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/split.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/split.rb index 121ea50c0b..4301c0a006 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/split.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/split.rb @@ -28,14 +28,16 @@ module Actions # * The index must be read-only. # * The cluster health status must be green. # You can do make an index read-only with the following request using the add index block API: - # + + # + # ``` # PUT /my_source_index/_block/write - # + + # ``` + # # The current write index on a data stream cannot be split. # In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. - # The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the +index.number_of_routing_shards+ setting. + # The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. # The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. - # For instance, a 5 shard index with +number_of_routing_shards+ set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + # For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. # A split operation: # * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. # * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. @@ -54,7 +56,7 @@ module Actions # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). Server default: 1. + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/stats.rb index 056b022fda..da799dd07f 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/stats.rb @@ -24,19 +24,19 @@ module Indices module Actions # Get index statistics. # For data streams, the API retrieves statistics for the stream's backing indices. - # By default, the returned statistics are index-level with +primaries+ and +total+ aggregations. - # +primaries+ are the values for only the primary shards. - # +total+ are the accumulated values for both primary and replica shards. - # To get shard-level statistics, set the +level+ parameter to +shards+. + # By default, the returned statistics are index-level with `primaries` and `total` aggregations. + # `primaries` are the values for only the primary shards. + # `total` are the accumulated values for both primary and replica shards. + # To get shard-level statistics, set the `level` parameter to `shards`. # NOTE: When moving to another node, the shard-level statistics for a shard are cleared. # Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. # # @option arguments [String, Array] :metric Limit the information returned the specific metrics. - # @option arguments [String, Array] :index A comma-separated list of index names; use +_all+ or empty string to perform the operation on all indices + # @option arguments [String, Array] :index A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices # @option arguments [String, Array] :completion_fields Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. If the request can target data streams, this argument # determines whether wildcard expressions match hidden data streams. Supports comma-separated values, - # such as +open,hidden+. + # such as `open,hidden`. # @option arguments [String, Array] :fielddata_fields Comma-separated list or wildcard expressions of fields to include in fielddata statistics. # @option arguments [String, Array] :fields Comma-separated list or wildcard expressions of fields to include in the statistics. # @option arguments [Boolean] :forbid_closed_indices If true, statistics are not collected from closed indices. Server default: true. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/indices/validate_query.rb b/elasticsearch-api/lib/elasticsearch/api/actions/indices/validate_query.rb index 7e7fd81ac6..e5e0256540 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/indices/validate_query.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/indices/validate_query.rb @@ -26,25 +26,25 @@ module Actions # Validates a query without running it. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases to search. - # Supports wildcards (+*+). - # To search all data streams or indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # Supports wildcards (`*`). + # To search all data streams or indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. Server default: true. - # @option arguments [Boolean] :all_shards If +true+, the validation is executed on all shards instead of one random shard per index. + # @option arguments [Boolean] :all_shards If `true`, the validation is executed on all shards instead of one random shard per index. # @option arguments [String] :analyzer Analyzer to use for the query string. - # This parameter can only be used when the +q+ query string parameter is specified. - # @option arguments [Boolean] :analyze_wildcard If +true+, wildcard and prefix queries are analyzed. - # @option arguments [String] :default_operator The default operator for query string query: +AND+ or +OR+. Server default: OR. + # This parameter can only be used when the `q` query string parameter is specified. + # @option arguments [Boolean] :analyze_wildcard If `true`, wildcard and prefix queries are analyzed. + # @option arguments [String] :default_operator The default operator for query string query: `AND` or `OR`. Server default: OR. # @option arguments [String] :df Field to use as default where no field prefix is given in the query string. - # This parameter can only be used when the +q+ query string parameter is specified. + # This parameter can only be used when the `q` query string parameter is specified. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :explain If +true+, the response returns detailed information if an error has occurred. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :lenient If +true+, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - # @option arguments [Boolean] :rewrite If +true+, returns a more detailed explanation showing the actual Lucene query that will be executed. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :explain If `true`, the response returns detailed information if an error has occurred. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :lenient If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + # @option arguments [Boolean] :rewrite If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. # @option arguments [String] :q Query in the Lucene query string syntax. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/chat_completion_unified.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/chat_completion_unified.rb index a300350393..9e086e5df2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/chat_completion_unified.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/chat_completion_unified.rb @@ -24,13 +24,13 @@ module Inference module Actions # Perform chat completion inference # The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. - # It only works with the +chat_completion+ task type for +openai+ and +elastic+ inference services. + # It only works with the `chat_completion` task type for `openai` and `elastic` inference services. # IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. # For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - # NOTE: The +chat_completion+ task type is only available within the _stream API and only supports streaming. + # NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. # The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. # The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. - # If you use the +openai+ service or the +elastic+ service, use the Chat completion inference API. + # If you use the `openai` service or the `elastic` service, use the Chat completion inference API. # # @option arguments [String] :inference_id The inference Id (*Required*) # @option arguments [Time] :timeout Specifies the amount of time to wait for the inference request to complete. Server default: 30s. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_alibabacloud.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_alibabacloud.rb index 545a7fea3e..388a465761 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_alibabacloud.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_alibabacloud.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Create an AlibabaCloud AI Search inference endpoint. - # Create an inference endpoint to perform an inference task with the +alibabacloud-ai-search+ service. + # Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :alibabacloud_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_amazonbedrock.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_amazonbedrock.rb index fe507d2147..ec39a30d3c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_amazonbedrock.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_amazonbedrock.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Create an Amazon Bedrock inference endpoint. - # Creates an inference endpoint to perform an inference task with the +amazonbedrock+ service. + # Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :amazonbedrock_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_anthropic.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_anthropic.rb index 9ecc1a2259..059bf58939 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_anthropic.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_anthropic.rb @@ -23,10 +23,10 @@ module API module Inference module Actions # Create an Anthropic inference endpoint. - # Create an inference endpoint to perform an inference task with the +anthropic+ service. + # Create an inference endpoint to perform an inference task with the `anthropic` service. # # @option arguments [String] :task_type The task type. - # The only valid task type for the model to perform is +completion+. (*Required*) + # The only valid task type for the model to perform is `completion`. (*Required*) # @option arguments [String] :anthropic_inference_id The unique identifier of the inference endpoint. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureaistudio.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureaistudio.rb index bde6b1e92c..9acc4d90e6 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureaistudio.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureaistudio.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Create an Azure AI studio inference endpoint. - # Create an inference endpoint to perform an inference task with the +azureaistudio+ service. + # Create an inference endpoint to perform an inference task with the `azureaistudio` service. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :azureaistudio_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureopenai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureopenai.rb index fb6bd70ab0..1842390a09 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureopenai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_azureopenai.rb @@ -23,14 +23,14 @@ module API module Inference module Actions # Create an Azure OpenAI inference endpoint. - # Create an inference endpoint to perform an inference task with the +azureopenai+ service. + # Create an inference endpoint to perform an inference task with the `azureopenai` service. # The list of chat completion models that you can choose from in your Azure OpenAI deployment include: # * {https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models GPT-4 and GPT-4 Turbo models} # * {https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35 GPT-3.5} # The list of embeddings models that you can choose from in your deployment can be found in the {https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings Azure models documentation}. # # @option arguments [String] :task_type The type of the inference task that the model will perform. - # NOTE: The +chat_completion+ task type only supports streaming and only through the _stream API. (*Required*) + # NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. (*Required*) # @option arguments [String] :azureopenai_inference_id The unique identifier of the inference endpoint. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_cohere.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_cohere.rb index aa838b5c1a..24bb09b8e7 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_cohere.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_cohere.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Create a Cohere inference endpoint. - # Create an inference endpoint to perform an inference task with the +cohere+ service. + # Create an inference endpoint to perform an inference task with the `cohere` service. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :cohere_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_elasticsearch.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_elasticsearch.rb index 7a966ef3a2..812b8a8158 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_elasticsearch.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_elasticsearch.rb @@ -23,11 +23,11 @@ module API module Inference module Actions # Create an Elasticsearch inference endpoint. - # Create an inference endpoint to perform an inference task with the +elasticsearch+ service. + # Create an inference endpoint to perform an inference task with the `elasticsearch` service. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :elasticsearch_inference_id The unique identifier of the inference endpoint. - # The must not match the +model_id+. (*Required*) + # The must not match the `model_id`. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_elser.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_elser.rb index 16e6e80bb3..060f498a1a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_elser.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_elser.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Create an ELSER inference endpoint. - # Create an inference endpoint to perform an inference task with the +elser+ service. + # Create an inference endpoint to perform an inference task with the `elser` service. # You can also deploy ELSER by using the Elasticsearch inference integration. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googleaistudio.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googleaistudio.rb index a0514a4e9d..73279f144f 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googleaistudio.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googleaistudio.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Create an Google AI Studio inference endpoint. - # Create an inference endpoint to perform an inference task with the +googleaistudio+ service. + # Create an inference endpoint to perform an inference task with the `googleaistudio` service. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :googleaistudio_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googlevertexai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googlevertexai.rb index 7cc7d809c7..961046c7b0 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googlevertexai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_googlevertexai.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Create a Google Vertex AI inference endpoint. - # Create an inference endpoint to perform an inference task with the +googlevertexai+ service. + # Create an inference endpoint to perform an inference task with the `googlevertexai` service. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :googlevertexai_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_hugging_face.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_hugging_face.rb index fab5f7b154..15d8d5f285 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_hugging_face.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_hugging_face.rb @@ -23,18 +23,18 @@ module API module Inference module Actions # Create a Hugging Face inference endpoint. - # Create an inference endpoint to perform an inference task with the +hugging_face+ service. + # Create an inference endpoint to perform an inference task with the `hugging_face` service. # You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. - # Select the model you want to use on the new endpoint creation page (for example +intfloat/e5-small-v2+), then select the sentence embeddings task under the advanced configuration section. + # Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. # Create the endpoint and copy the URL after the endpoint initialization has been finished. # The following models are recommended for the Hugging Face service: - # * +all-MiniLM-L6-v2+ - # * +all-MiniLM-L12-v2+ - # * +all-mpnet-base-v2+ - # * +e5-base-v2+ - # * +e5-small-v2+ - # * +multilingual-e5-base+ - # * +multilingual-e5-small+ + # * `all-MiniLM-L6-v2` + # * `all-MiniLM-L12-v2` + # * `all-mpnet-base-v2` + # * `e5-base-v2` + # * `e5-small-v2` + # * `multilingual-e5-base` + # * `multilingual-e5-small` # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :huggingface_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_jinaai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_jinaai.rb index ace5a1d695..e14879dd04 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_jinaai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_jinaai.rb @@ -23,9 +23,9 @@ module API module Inference module Actions # Create an JinaAI inference endpoint. - # Create an inference endpoint to perform an inference task with the +jinaai+ service. - # To review the available +rerank+ models, refer to . - # To review the available +text_embedding+ models, refer to the . + # Create an inference endpoint to perform an inference task with the `jinaai` service. + # To review the available `rerank` models, refer to . + # To review the available `text_embedding` models, refer to the . # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) # @option arguments [String] :jinaai_inference_id The unique identifier of the inference endpoint. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_mistral.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_mistral.rb index 3fd32dc48a..b8746eae07 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_mistral.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_mistral.rb @@ -23,10 +23,10 @@ module API module Inference module Actions # Create a Mistral inference endpoint. - # Creates an inference endpoint to perform an inference task with the +mistral+ service. + # Creates an inference endpoint to perform an inference task with the `mistral` service. # # @option arguments [String] :task_type The task type. - # The only valid task type for the model to perform is +text_embedding+. (*Required*) + # The only valid task type for the model to perform is `text_embedding`. (*Required*) # @option arguments [String] :mistral_inference_id The unique identifier of the inference endpoint. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_openai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_openai.rb index e7bfb746f9..81efd183a0 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_openai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_openai.rb @@ -23,10 +23,10 @@ module API module Inference module Actions # Create an OpenAI inference endpoint. - # Create an inference endpoint to perform an inference task with the +openai+ service or +openai+ compatible APIs. + # Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. # # @option arguments [String] :task_type The type of the inference task that the model will perform. - # NOTE: The +chat_completion+ task type only supports streaming and only through the _stream API. (*Required*) + # NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. (*Required*) # @option arguments [String] :openai_inference_id The unique identifier of the inference endpoint. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_voyageai.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_voyageai.rb index 1143543838..9d1edd41f5 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_voyageai.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_voyageai.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Create a VoyageAI inference endpoint. - # Create an inference endpoint to perform an inference task with the +voyageai+ service. + # Create an inference endpoint to perform an inference task with the `voyageai` service. # Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. # # @option arguments [String] :task_type The type of the inference task that the model will perform. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_watsonx.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_watsonx.rb index c57f7f6099..9f200dfe99 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_watsonx.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/put_watsonx.rb @@ -23,12 +23,12 @@ module API module Inference module Actions # Create a Watsonx inference endpoint. - # Create an inference endpoint to perform an inference task with the +watsonxai+ service. - # You need an IBM Cloud Databases for Elasticsearch deployment to use the +watsonxai+ inference service. + # Create an inference endpoint to perform an inference task with the `watsonxai` service. + # You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. # You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. # # @option arguments [String] :task_type The task type. - # The only valid task type for the model to perform is +text_embedding+. (*Required*) + # The only valid task type for the model to perform is `text_embedding`. (*Required*) # @option arguments [String] :watsonx_inference_id The unique identifier of the inference endpoint. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/stream_completion.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/stream_completion.rb index dd3d03d63d..60ab32cffd 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/stream_completion.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/stream_completion.rb @@ -26,7 +26,7 @@ module Actions # Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. # This API works only with the completion task type. # IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - # This API requires the +monitor_inference+ cluster privilege (the built-in +inference_admin+ and +inference_user+ roles grant this privilege). You must use a client that supports streaming. + # This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. # # @option arguments [String] :inference_id The unique identifier for the inference endpoint. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/inference/update.rb b/elasticsearch-api/lib/elasticsearch/api/actions/inference/update.rb index 76a0ffe88b..19382bd67d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/inference/update.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/inference/update.rb @@ -23,7 +23,7 @@ module API module Inference module Actions # Update an inference endpoint. - # Modify +task_settings+, secrets (within +service_settings+), or +num_allocations+ for an inference endpoint, depending on the specific endpoint service and +task_type+. + # Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. # IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. # For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. # However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/delete_ip_location_database.rb b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/delete_ip_location_database.rb index 572ce52327..9a9a44470d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/delete_ip_location_database.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/delete_ip_location_database.rb @@ -27,10 +27,10 @@ module Actions # @option arguments [String, Array] :id A comma-separated list of IP location database configurations. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # A value of +-1+ indicates that the request should never time out. Server default: 30s. + # A value of `-1` indicates that the request should never time out. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. - # A value of +-1+ indicates that the request should never time out. Server default: 30s. + # A value of `-1` indicates that the request should never time out. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/delete_pipeline.rb b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/delete_pipeline.rb index 310cb3b0a9..26451c5df4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/delete_pipeline.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/delete_pipeline.rb @@ -26,7 +26,7 @@ module Actions # Delete one or more ingest pipelines. # # @option arguments [String] :id Pipeline ID or wildcard expression of pipeline IDs used to limit the request. - # To delete all ingest pipelines in a cluster, use a value of +*+. (*Required*) + # To delete all ingest pipelines in a cluster, use a value of `*`. (*Required*) # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_geoip_database.rb b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_geoip_database.rb index 6094f39a00..803bfa32a5 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_geoip_database.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_geoip_database.rb @@ -26,8 +26,8 @@ module Actions # Get information about one or more IP geolocation database configurations. # # @option arguments [String, Array] :id A comma-separated list of database configuration IDs to retrieve. - # Wildcard (+*+) expressions are supported. - # To get all database configurations, omit this parameter or use +*+. + # Wildcard (`*`) expressions are supported. + # To get all database configurations, omit this parameter or use `*`. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_ip_location_database.rb b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_ip_location_database.rb index bbaaca5404..50947b3eb1 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_ip_location_database.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_ip_location_database.rb @@ -25,11 +25,11 @@ module Actions # Get IP geolocation database configurations. # # @option arguments [String, Array] :id Comma-separated list of database configuration IDs to retrieve. - # Wildcard (+*+) expressions are supported. - # To get all database configurations, omit this parameter or use +*+. + # Wildcard (`*`) expressions are supported. + # To get all database configurations, omit this parameter or use `*`. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # A value of +-1+ indicates that the request should never time out. Server default: 30s. + # A value of `-1` indicates that the request should never time out. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_pipeline.rb b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_pipeline.rb index 48a86d8001..5320e4f16c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_pipeline.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/get_pipeline.rb @@ -27,8 +27,8 @@ module Actions # This API returns a local reference of the pipeline. # # @option arguments [String] :id Comma-separated list of pipeline IDs to retrieve. - # Wildcard (+*+) expressions are supported. - # To get all ingest pipelines, omit this parameter or use +*+. + # Wildcard (`*`) expressions are supported. + # To get all ingest pipelines, omit this parameter or use `*`. # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Boolean] :summary Return pipelines without their definitions (default: false) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/put_ip_location_database.rb b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/put_ip_location_database.rb index c8c25c27ea..cf3934190c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/put_ip_location_database.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/put_ip_location_database.rb @@ -27,10 +27,10 @@ module Actions # @option arguments [String] :id The database configuration identifier. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # A value of +-1+ indicates that the request should never time out. Server default: 30s. + # A value of `-1` indicates that the request should never time out. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. # If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. - # A value of +-1+ indicates that the request should never time out. Server default: 30s. + # A value of `-1` indicates that the request should never time out. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body configuration # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/simulate.rb b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/simulate.rb index f07bdcc564..10cf75b3e8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/ingest/simulate.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/ingest/simulate.rb @@ -27,8 +27,8 @@ module Actions # You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. # # @option arguments [String] :id The pipeline to test. - # If you don't specify a +pipeline+ in the request body, this parameter is required. - # @option arguments [Boolean] :verbose If +true+, the response includes output data for each processor in the executed pipeline. + # If you don't specify a `pipeline` in the request body, this parameter is required. + # @option arguments [Boolean] :verbose If `true`, the response includes output data for each processor in the executed pipeline. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/license/get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/license/get.rb index 4dcaaed830..ed56db65ca 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/license/get.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/license/get.rb @@ -25,9 +25,9 @@ module Actions # Get license information. # Get information about your Elastic license including its type, its status, when it was issued, and when it expires. # - # @option arguments [Boolean] :accept_enterprise If +true+, this parameter returns enterprise for Enterprise license types. If +false+, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. + # @option arguments [Boolean] :accept_enterprise If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. # This parameter is deprecated and will always be set to true in 8.x. Server default: true. - # @option arguments [Boolean] :local Specifies whether to retrieve local information. The default value is +false+, which means the information is retrieved from the master node. + # @option arguments [Boolean] :local Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/license/post_start_basic.rb b/elasticsearch-api/lib/elasticsearch/api/actions/license/post_start_basic.rb index 28662b0d8b..a99f54da12 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/license/post_start_basic.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/license/post_start_basic.rb @@ -26,7 +26,7 @@ module Actions # Start an indefinite basic license, which gives access to all the basic features. # NOTE: In order to start a basic license, you must not currently have a basic license. # If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. - # You must then re-submit the API request with the +acknowledge+ parameter set to +true+. + # You must then re-submit the API request with the `acknowledge` parameter set to `true`. # To check the status of your basic license, use the get basic license API. # # @option arguments [Boolean] :acknowledge whether the user has acknowledged acknowledge messages (default: false) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/close_job.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/close_job.rb index 386281093a..fc2e842a0a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/close_job.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/close_job.rb @@ -28,11 +28,11 @@ module Actions # If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. # When a datafeed that has a specified end date stops, it automatically closes its associated job. # - # @option arguments [String] :job_id Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using +_all+ or by specifying +*+ as the job identifier. (*Required*) - # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the +_all+ string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches. - # If +false+, the request returns a 404 status code when there are no matches or only partial matches. Server default: true. + # @option arguments [String] :job_id Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. (*Required*) + # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches. + # If `false`, the request returns a 404 status code when there are no matches or only partial matches. Server default: true. # @option arguments [Boolean] :force Use to close a failed job, or to forcefully close a job which has not responded to its initial close request; the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots. - # If you want the job to be in a consistent state after the close job API returns, do not set to +true+. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future. + # If you want the job to be in a consistent state after the close job API returns, do not set to `true`. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future. # @option arguments [Time] :timeout Controls the time to wait until a job has closed. Server default: 30m. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_data_frame_analytics.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_data_frame_analytics.rb index 337ec1b27e..ef696ba46c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_data_frame_analytics.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_data_frame_analytics.rb @@ -25,7 +25,7 @@ module Actions # Delete a data frame analytics job. # # @option arguments [String] :id Identifier for the data frame analytics job. (*Required*) - # @option arguments [Boolean] :force If +true+, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. + # @option arguments [Boolean] :force If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. # @option arguments [Time] :timeout The time to wait for the job to be deleted. Server default: 1m. # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_expired_data.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_expired_data.rb index 884fff38e7..4b81026db9 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_expired_data.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_expired_data.rb @@ -29,8 +29,8 @@ module Actions # You can limit the request to a single or set of anomaly detection jobs by # using a job identifier, a group name, a comma-separated list of jobs, or a # wildcard expression. You can delete expired data for all anomaly detection - # jobs by using +_all+, by specifying +*+ as the ++, or by omitting the - # ++. + # jobs by using `_all`, by specifying `*` as the ``, or by omitting the + # ``. # # @option arguments [String] :job_id Identifier for an anomaly detection job. It can be a job identifier, a # group name, or a wildcard expression. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_forecast.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_forecast.rb index 2642bab316..24656f34f4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_forecast.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_forecast.rb @@ -24,16 +24,16 @@ module MachineLearning module Actions # Delete forecasts from a job. # By default, forecasts are retained for 14 days. You can specify a - # different retention period with the +expires_in+ parameter in the forecast + # different retention period with the `expires_in` parameter in the forecast # jobs API. The delete forecast API enables you to delete one or more # forecasts before they expire. # # @option arguments [String] :job_id Identifier for the anomaly detection job. (*Required*) # @option arguments [String] :forecast_id A comma-separated list of forecast identifiers. If you do not specify - # this optional parameter or if you specify +_all+ or +*+ the API deletes + # this optional parameter or if you specify `_all` or `*` the API deletes # all forecasts from the job. # @option arguments [Boolean] :allow_no_forecasts Specifies whether an error occurs when there are no forecasts. In - # particular, if this parameter is set to +false+ and there are no + # particular, if this parameter is set to `false` and there are no # forecasts associated with the job, attempts to delete all forecasts # return an error. Server default: true. # @option arguments [Time] :timeout Specifies the period of time to wait for the completion of the delete diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_model_snapshot.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_model_snapshot.rb index 3657140310..51a06280c8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_model_snapshot.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_model_snapshot.rb @@ -25,7 +25,7 @@ module Actions # Delete a model snapshot. # You cannot delete the active model snapshot. To delete that snapshot, first # revert to a different one. To identify the active model snapshot, refer to - # the +model_snapshot_id+ in the results from the get jobs API. + # the `model_snapshot_id` in the results from the get jobs API. # # @option arguments [String] :job_id Identifier for the anomaly detection job. (*Required*) # @option arguments [String] :snapshot_id Identifier for the model snapshot. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_trained_model_alias.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_trained_model_alias.rb index a8a279f57a..6b9bad8417 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_trained_model_alias.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/delete_trained_model_alias.rb @@ -25,7 +25,7 @@ module Actions # Delete a trained model alias. # This API deletes an existing model alias that refers to a trained model. If # the model alias is missing or refers to a model other than the one identified - # by the +model_id+, this API returns an error. + # by the `model_id`, this API returns an error. # # @option arguments [String] :model_alias The model alias to delete. (*Required*) # @option arguments [String] :model_id The trained model ID to which the model alias refers. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/flush_job.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/flush_job.rb index 2d76063a4a..5f6cca06ac 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/flush_job.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/flush_job.rb @@ -37,11 +37,11 @@ module Actions # and the model is updated for data from the specified time interval. # @option arguments [Boolean] :calc_interim If true, calculates the interim results for the most recent bucket or all # buckets within the latency period. - # @option arguments [String, Time] :end When used in conjunction with +calc_interim+ and +start+, specifies the + # @option arguments [String, Time] :end When used in conjunction with `calc_interim` and `start`, specifies the # range of buckets on which to calculate interim results. # @option arguments [String, Time] :skip_time Specifies to skip to a particular time value. Results are not generated # and the model is not updated for data from the specified time interval. - # @option arguments [String, Time] :start When used in conjunction with +calc_interim+, specifies the range of + # @option arguments [String, Time] :start When used in conjunction with `calc_interim`, specifies the range of # buckets on which to calculate interim results. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/forecast.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/forecast.rb index b1cec0ba2c..6a445ab2da 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/forecast.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/forecast.rb @@ -25,13 +25,13 @@ module Actions # Predict future behavior of a time series. # Forecasts are not supported for jobs that perform population analysis; an # error occurs if you try to create a forecast for a job that has an - # +over_field_name+ in its configuration. Forcasts predict future behavior + # `over_field_name` in its configuration. Forcasts predict future behavior # based on historical data. # # @option arguments [String] :job_id Identifier for the anomaly detection job. The job must be open when you # create a forecast; otherwise, an error occurs. (*Required*) # @option arguments [Time] :duration A period of time that indicates how far into the future to forecast. For - # example, +30d+ corresponds to 30 days. The forecast starts at the last + # example, `30d` corresponds to 30 days. The forecast starts at the last # record that was processed. Server default: 1d. # @option arguments [Time] :expires_in The period of time that forecast results are retained. After a forecast # expires, the results are deleted. If set to a value of 0, the forecast is diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_buckets.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_buckets.rb index e321e7d871..9de11ff5d6 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_buckets.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_buckets.rb @@ -29,15 +29,15 @@ module Actions # @option arguments [String, Time] :timestamp The timestamp of a single bucket result. If you do not specify this # parameter, the API returns information about all buckets. # @option arguments [Float] :anomaly_score Returns buckets with anomaly scores greater or equal than this value. Server default: 0. - # @option arguments [Boolean] :desc If +true+, the buckets are sorted in descending order. - # @option arguments [String, Time] :end Returns buckets with timestamps earlier than this time. +-1+ means it is + # @option arguments [Boolean] :desc If `true`, the buckets are sorted in descending order. + # @option arguments [String, Time] :end Returns buckets with timestamps earlier than this time. `-1` means it is # unset and results are not limited to specific timestamps. Server default: -1. - # @option arguments [Boolean] :exclude_interim If +true+, the output excludes interim results. + # @option arguments [Boolean] :exclude_interim If `true`, the output excludes interim results. # @option arguments [Boolean] :expand If true, the output includes anomaly records. # @option arguments [Integer] :from Skips the specified number of buckets. Server default: 0. # @option arguments [Integer] :size Specifies the maximum number of buckets to obtain. Server default: 100. # @option arguments [String] :sort Specifies the sort field for the requested buckets. Server default: timestamp. - # @option arguments [String, Time] :start Returns buckets with timestamps after this time. +-1+ means it is unset + # @option arguments [String, Time] :start Returns buckets with timestamps after this time. `-1` means it is unset # and results are not limited to specific timestamps. Server default: -1. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_calendar_events.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_calendar_events.rb index 7251f54e40..78ab4886b4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_calendar_events.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_calendar_events.rb @@ -24,10 +24,10 @@ module MachineLearning module Actions # Get info about events in calendars. # - # @option arguments [String] :calendar_id A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using +_all+ or +*+ or by omitting the calendar identifier. (*Required*) + # @option arguments [String] :calendar_id A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. (*Required*) # @option arguments [String, Time] :end Specifies to get events with timestamps earlier than this time. # @option arguments [Integer] :from Skips the specified number of events. Server default: 0. - # @option arguments [String] :job_id Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of +_all+ or +*+. + # @option arguments [String] :job_id Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. # @option arguments [Integer] :size Specifies the maximum number of events to obtain. Server default: 100. # @option arguments [String, Time] :start Specifies to get events with timestamps after this time. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_calendars.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_calendars.rb index 26d6c6e6b6..abe2f86895 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_calendars.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_calendars.rb @@ -24,7 +24,7 @@ module MachineLearning module Actions # Get calendar configuration info. # - # @option arguments [String] :calendar_id A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using +_all+ or +*+ or by omitting the calendar identifier. + # @option arguments [String] :calendar_id A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. # @option arguments [Integer] :from Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. Server default: 0. # @option arguments [Integer] :size Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. Server default: 10000. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_data_frame_analytics.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_data_frame_analytics.rb index 2fcab59fbb..44ce6b80de 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_data_frame_analytics.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_data_frame_analytics.rb @@ -33,11 +33,11 @@ module Actions # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no data frame analytics # jobs that match. - # - Contains the +_all+ string or no identifiers and there are no matches. + # - Contains the `_all` string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. # The default value returns an empty data_frame_analytics array when there # are no matches and the subset of results when there are partial matches. - # If this parameter is +false+, the request returns a 404 status code when + # If this parameter is `false`, the request returns a 404 status code when # there are no matches or only partial matches. Server default: true. # @option arguments [Integer] :from Skips the specified number of data frame analytics jobs. Server default: 0. # @option arguments [Integer] :size Specifies the maximum number of data frame analytics jobs to obtain. Server default: 100. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_data_frame_analytics_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_data_frame_analytics_stats.rb index 20add65a34..0c41c48fa9 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_data_frame_analytics_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_data_frame_analytics_stats.rb @@ -30,11 +30,11 @@ module Actions # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no data frame analytics # jobs that match. - # - Contains the +_all+ string or no identifiers and there are no matches. + # - Contains the `_all` string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. # The default value returns an empty data_frame_analytics array when there # are no matches and the subset of results when there are partial matches. - # If this parameter is +false+, the request returns a 404 status code when + # If this parameter is `false`, the request returns a 404 status code when # there are no matches or only partial matches. Server default: true. # @option arguments [Integer] :from Skips the specified number of data frame analytics jobs. Server default: 0. # @option arguments [Integer] :size Specifies the maximum number of data frame analytics jobs to obtain. Server default: 100. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_datafeed_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_datafeed_stats.rb index 8c3d253217..e9eecc85cc 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_datafeed_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_datafeed_stats.rb @@ -25,9 +25,9 @@ module Actions # Get datafeed stats. # You can get statistics for multiple datafeeds in a single API request by # using a comma-separated list of datafeeds or a wildcard expression. You can - # get statistics for all datafeeds by using +_all+, by specifying +*+ as the - # ++, or by omitting the ++. If the datafeed is stopped, the - # only information you receive is the +datafeed_id+ and the +state+. + # get statistics for all datafeeds by using `_all`, by specifying `*` as the + # ``, or by omitting the ``. If the datafeed is stopped, the + # only information you receive is the `datafeed_id` and the `state`. # This API returns a maximum of 10,000 datafeeds. # # @option arguments [String, Array] :datafeed_id Identifier for the datafeed. It can be a datafeed identifier or a @@ -35,12 +35,12 @@ module Actions # returns information about all datafeeds. # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no datafeeds that match. - # - Contains the +_all+ string or no identifiers and there are no matches. + # - Contains the `_all` string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. - # The default value is +true+, which returns an empty +datafeeds+ array + # The default value is `true`, which returns an empty `datafeeds` array # when there are no matches and the subset of results when there are - # partial matches. If this parameter is +false+, the request returns a - # +404+ status code when there are no matches or only partial matches. + # partial matches. If this parameter is `false`, the request returns a + # `404` status code when there are no matches or only partial matches. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_datafeeds.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_datafeeds.rb index 21849047cf..42902da6b3 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_datafeeds.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_datafeeds.rb @@ -25,8 +25,8 @@ module Actions # Get datafeeds configuration info. # You can get information for multiple datafeeds in a single API request by # using a comma-separated list of datafeeds or a wildcard expression. You can - # get information for all datafeeds by using +_all+, by specifying +*+ as the - # ++, or by omitting the ++. + # get information for all datafeeds by using `_all`, by specifying `*` as the + # ``, or by omitting the ``. # This API returns a maximum of 10,000 datafeeds. # # @option arguments [String, Array] :datafeed_id Identifier for the datafeed. It can be a datafeed identifier or a @@ -34,12 +34,12 @@ module Actions # returns information about all datafeeds. # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no datafeeds that match. - # - Contains the +_all+ string or no identifiers and there are no matches. + # - Contains the `_all` string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. - # The default value is +true+, which returns an empty +datafeeds+ array + # The default value is `true`, which returns an empty `datafeeds` array # when there are no matches and the subset of results when there are - # partial matches. If this parameter is +false+, the request returns a - # +404+ status code when there are no matches or only partial matches. + # partial matches. If this parameter is `false`, the request returns a + # `404` status code when there are no matches or only partial matches. # @option arguments [Boolean] :exclude_generated Indicates if certain fields should be removed from the configuration on # retrieval. This allows the configuration to be in an acceptable format to # be retrieved and then added to another cluster. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_influencers.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_influencers.rb index 79245406fa..527e523d13 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_influencers.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_influencers.rb @@ -25,7 +25,7 @@ module Actions # Get anomaly detection job results for influencers. # Influencers are the entities that have contributed to, or are to blame for, # the anomalies. Influencer results are available only if an - # +influencer_field_name+ is specified in the job configuration. + # `influencer_field_name` is specified in the job configuration. # # @option arguments [String] :job_id Identifier for the anomaly detection job. (*Required*) # @option arguments [Boolean] :desc If true, the results are sorted in descending order. @@ -39,7 +39,7 @@ module Actions # @option arguments [Integer] :from Skips the specified number of influencers. Server default: 0. # @option arguments [Integer] :size Specifies the maximum number of influencers to obtain. Server default: 100. # @option arguments [String] :sort Specifies the sort field for the requested influencers. By default, the - # influencers are sorted by the +influencer_score+ value. + # influencers are sorted by the `influencer_score` value. # @option arguments [String, Time] :start Returns influencers with timestamps after this time. The default value # means it is unset and results are not limited to specific timestamps. Server default: -1. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_job_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_job_stats.rb index f85a689f0f..7c5c5ad871 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_job_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_job_stats.rb @@ -32,9 +32,9 @@ module Actions # - Contains wildcard expressions and there are no jobs that match. # - Contains the _all string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. - # If +true+, the API returns an empty +jobs+ array when + # If `true`, the API returns an empty `jobs` array when # there are no matches and the subset of results when there are partial - # matches. If +false+, the API returns a +404+ status + # matches. If `false`, the API returns a `404` status # code when there are no matches or only partial matches. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_jobs.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_jobs.rb index b59b9ecd72..6e7bfe0e3e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_jobs.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_jobs.rb @@ -26,7 +26,7 @@ module Actions # You can get information for multiple anomaly detection jobs in a single API # request by using a group name, a comma-separated list of jobs, or a wildcard # expression. You can get information for all anomaly detection jobs by using - # +_all+, by specifying +*+ as the ++, or by omitting the ++. + # `_all`, by specifying `*` as the ``, or by omitting the ``. # # @option arguments [String, Array] :job_id Identifier for the anomaly detection job. It can be a job identifier, a # group name, or a wildcard expression. If you do not specify one of these @@ -35,9 +35,9 @@ module Actions # - Contains wildcard expressions and there are no jobs that match. # - Contains the _all string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. - # The default value is +true+, which returns an empty +jobs+ array when + # The default value is `true`, which returns an empty `jobs` array when # there are no matches and the subset of results when there are partial - # matches. If this parameter is +false+, the request returns a +404+ status + # matches. If this parameter is `false`, the request returns a `404` status # code when there are no matches or only partial matches. Server default: true. # @option arguments [Boolean] :exclude_generated Indicates if certain fields should be removed from the configuration on # retrieval. This allows the configuration to be in an acceptable format to diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_memory_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_memory_stats.rb index 13f5bb4b9a..9916c747ef 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_memory_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_memory_stats.rb @@ -26,8 +26,8 @@ module Actions # Get information about how machine learning jobs and trained models are using memory, # on each node, both within the JVM heap, and natively, outside of the JVM. # - # @option arguments [String] :node_id The names of particular nodes in the cluster to target. For example, +nodeId1,nodeId2+ or - # +ml:true+ + # @option arguments [String] :node_id The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or + # `ml:true` # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is received before the timeout # expires, the request fails and returns an error. Server default: 30s. # @option arguments [Time] :timeout Period to wait for a response. If no response is received before the timeout expires, the request diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_model_snapshot_upgrade_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_model_snapshot_upgrade_stats.rb index 2515be5b54..3824a7dd9b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_model_snapshot_upgrade_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_model_snapshot_upgrade_stats.rb @@ -26,8 +26,8 @@ module Actions # # @option arguments [String] :job_id Identifier for the anomaly detection job. (*Required*) # @option arguments [String] :snapshot_id A numerical character string that uniquely identifies the model snapshot. You can get information for multiple - # snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using +_all+, - # by specifying +*+ as the snapshot ID, or by omitting the snapshot ID. (*Required*) + # snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + # by specifying `*` as the snapshot ID, or by omitting the snapshot ID. (*Required*) # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no jobs that match. # - Contains the _all string or no identifiers and there are no matches. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_model_snapshots.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_model_snapshots.rb index 41efb6ca22..96a8f9434a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_model_snapshots.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_model_snapshots.rb @@ -26,8 +26,8 @@ module Actions # # @option arguments [String] :job_id Identifier for the anomaly detection job. (*Required*) # @option arguments [String] :snapshot_id A numerical character string that uniquely identifies the model snapshot. You can get information for multiple - # snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using +_all+, - # by specifying +*+ as the snapshot ID, or by omitting the snapshot ID. + # snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + # by specifying `*` as the snapshot ID, or by omitting the snapshot ID. # @option arguments [Boolean] :desc If true, the results are sorted in descending order. # @option arguments [String, Time] :end Returns snapshots with timestamps earlier than this time. # @option arguments [Integer] :from Skips the specified number of snapshots. Server default: 0. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_overall_buckets.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_overall_buckets.rb index 5ad6d621f5..ea1c058c7c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_overall_buckets.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_overall_buckets.rb @@ -25,45 +25,45 @@ module Actions # Get overall bucket results. # Retrievs overall bucket results that summarize the bucket results of # multiple anomaly detection jobs. - # The +overall_score+ is calculated by combining the scores of all the + # The `overall_score` is calculated by combining the scores of all the # buckets within the overall bucket span. First, the maximum - # +anomaly_score+ per anomaly detection job in the overall bucket is - # calculated. Then the +top_n+ of those scores are averaged to result in - # the +overall_score+. This means that you can fine-tune the - # +overall_score+ so that it is more or less sensitive to the number of + # `anomaly_score` per anomaly detection job in the overall bucket is + # calculated. Then the `top_n` of those scores are averaged to result in + # the `overall_score`. This means that you can fine-tune the + # `overall_score` so that it is more or less sensitive to the number of # jobs that detect an anomaly at the same time. For example, if you set - # +top_n+ to +1+, the +overall_score+ is the maximum bucket score in the - # overall bucket. Alternatively, if you set +top_n+ to the number of jobs, - # the +overall_score+ is high only when all jobs detect anomalies in that - # overall bucket. If you set the +bucket_span+ parameter (to a value - # greater than its default), the +overall_score+ is the maximum - # +overall_score+ of the overall buckets that have a span equal to the + # `top_n` to `1`, the `overall_score` is the maximum bucket score in the + # overall bucket. Alternatively, if you set `top_n` to the number of jobs, + # the `overall_score` is high only when all jobs detect anomalies in that + # overall bucket. If you set the `bucket_span` parameter (to a value + # greater than its default), the `overall_score` is the maximum + # `overall_score` of the overall buckets that have a span equal to the # jobs' largest bucket span. # # @option arguments [String] :job_id Identifier for the anomaly detection job. It can be a job identifier, a # group name, a comma-separated list of jobs or groups, or a wildcard # expression.You can summarize the bucket results for all anomaly detection jobs by - # using +_all+ or by specifying +*+ as the ++. (*Required*) + # using `_all` or by specifying `*` as the ``. (*Required*) # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no jobs that match. - # - Contains the +_all+ string or no identifiers and there are no matches. + # - Contains the `_all` string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. - # If +true+, the request returns an empty +jobs+ array when there are no + # If `true`, the request returns an empty `jobs` array when there are no # matches and the subset of results when there are partial matches. If this - # parameter is +false+, the request returns a +404+ status code when there + # parameter is `false`, the request returns a `404` status code when there # are no matches or only partial matches. Server default: true. # @option arguments [Time] :bucket_span The span of the overall buckets. Must be greater or equal to the largest # bucket span of the specified anomaly detection jobs, which is the default # value.By default, an overall bucket has a span equal to the largest bucket span # of the specified anomaly detection jobs. To override that behavior, use - # the optional +bucket_span+ parameter. + # the optional `bucket_span` parameter. # @option arguments [String, Time] :end Returns overall buckets with timestamps earlier than this time. - # @option arguments [Boolean] :exclude_interim If +true+, the output excludes interim results. + # @option arguments [Boolean] :exclude_interim If `true`, the output excludes interim results. # @option arguments [Double, String] :overall_score Returns overall buckets with overall scores greater than or equal to this # value. # @option arguments [String, Time] :start Returns overall buckets with timestamps after this time. # @option arguments [Integer] :top_n The number of top anomaly detection job bucket scores to be used in the - # +overall_score+ calculation. Server default: 1. + # `overall_score` calculation. Server default: 1. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_records.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_records.rb index b80c6bca74..cd51f305d4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_records.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/get_records.rb @@ -38,7 +38,7 @@ module Actions # @option arguments [Boolean] :desc If true, the results are sorted in descending order. # @option arguments [String, Time] :end Returns records with timestamps earlier than this time. The default value # means results are not limited to specific timestamps. Server default: -1. - # @option arguments [Boolean] :exclude_interim If +true+, the output excludes interim results. + # @option arguments [Boolean] :exclude_interim If `true`, the output excludes interim results. # @option arguments [Integer] :from Skips the specified number of records. Server default: 0. # @option arguments [Float] :record_score Returns records with anomaly scores greater or equal than this value. Server default: 0. # @option arguments [Integer] :size Specifies the maximum number of records to obtain. Server default: 100. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_data_frame_analytics.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_data_frame_analytics.rb index 48798ae7fd..8e57eb02e2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_data_frame_analytics.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_data_frame_analytics.rb @@ -25,7 +25,7 @@ module Actions # Create a data frame analytics job. # This API creates a data frame analytics job that performs an analysis on the # source indices and stores the outcome in a destination index. - # By default, the query used in the source configuration is +{"match_all": {}}+. + # By default, the query used in the source configuration is `{"match_all": {}}`. # If the destination index does not exist, it is created automatically when you start the job. # If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_datafeed.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_datafeed.rb index 06c1376ebf..58a946d310 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_datafeed.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_datafeed.rb @@ -25,19 +25,27 @@ module Actions # Create a datafeed. # Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. # You can associate only one datafeed with each anomaly detection job. - # The datafeed contains a query that runs at a defined interval (+frequency+). - # If you are concerned about delayed data, you can add a delay (+query_delay') at each interval. - # By default, the datafeed uses the following query:+{"match_all": {"boost": 1}}+. + # The datafeed contains a query that runs at a defined interval (`frequency`). + # If you are concerned about delayed data, you can add a delay ( + # ``` + # query_delay') at each interval. + # By default, the datafeed uses the following query: + # ``` + # {"match_all": {"boost": 1}} + # ``` + # . # When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had # at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, # those credentials are used instead. # You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed - # directly to the+.ml-config+index. Do not give users+write+privileges on the+.ml-config` index. + # directly to the + # ``` + # .ml-config`index. Do not give users`write`privileges on the`.ml-config` index. # # @option arguments [String] :datafeed_id A numerical character string that uniquely identifies the datafeed. # This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. # It must start and end with alphanumeric characters. (*Required*) - # @option arguments [Boolean] :allow_no_indices If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +_all+ + # @option arguments [Boolean] :allow_no_indices If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` # string or when no indices are specified. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. If the request can target data streams, this argument determines # whether wildcard expressions match hidden data streams. Supports comma-separated values. Server default: open. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_filter.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_filter.rb index e2389b083b..a66bb69550 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_filter.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_filter.rb @@ -24,7 +24,7 @@ module MachineLearning module Actions # Create a filter. # A filter contains a list of strings. It can be used by one or more anomaly detection jobs. - # Specifically, filters are referenced in the +custom_rules+ property of detector configuration objects. + # Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. # # @option arguments [String] :filter_id A string that uniquely identifies a filter. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_job.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_job.rb index ad5960bfa4..6553fc1e47 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_job.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_job.rb @@ -23,21 +23,21 @@ module API module MachineLearning module Actions # Create an anomaly detection job. - # If you include a +datafeed_config+, you must have read index privileges on the source index. - # If you include a +datafeed_config+ but do not provide a query, the datafeed uses +{"match_all": {"boost": 1}}+. + # If you include a `datafeed_config`, you must have read index privileges on the source index. + # If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. # # @option arguments [String] :job_id The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +true+, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the - # +_all+ string or when no indices are specified. Server default: true. + # @option arguments [Boolean] :allow_no_indices If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + # `_all` string or when no indices are specified. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. If the request can target data streams, this argument determines # whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: - # - +all+: Match any data stream or index, including hidden ones. - # - +closed+: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. - # - +hidden+: Match hidden data streams and hidden indices. Must be combined with +open+, +closed+, or both. - # - +none+: Wildcard patterns are not accepted. - # - +open+: Match open, non-hidden indices. Also matches any non-hidden data stream. Server default: open. - # @option arguments [Boolean] :ignore_throttled If +true+, concrete, expanded or aliased indices are ignored when frozen. Server default: true. - # @option arguments [Boolean] :ignore_unavailable If +true+, unavailable indices (missing or closed) are ignored. + # - `all`: Match any data stream or index, including hidden ones. + # - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. + # - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. + # - `none`: Wildcard patterns are not accepted. + # - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. Server default: open. + # @option arguments [Boolean] :ignore_throttled If `true`, concrete, expanded or aliased indices are ignored when frozen. Server default: true. + # @option arguments [Boolean] :ignore_unavailable If `true`, unavailable indices (missing or closed) are ignored. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model.rb index 2cefb4921b..79a4d38407 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model.rb @@ -26,7 +26,7 @@ module Actions # Enable you to supply a trained model that is not created by data frame analytics. # # @option arguments [String] :model_id The unique identifier of the trained model. (*Required*) - # @option arguments [Boolean] :defer_definition_decompression If set to +true+ and a +compressed_definition+ is provided, + # @option arguments [Boolean] :defer_definition_decompression If set to `true` and a `compressed_definition` is provided, # the request defers definition decompression and skips relevant # validations. # @option arguments [Boolean] :wait_for_completion Whether to wait for all child operations (e.g. model download) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model_definition_part.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model_definition_part.rb index 86b99c93c0..f6a9baca58 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model_definition_part.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model_definition_part.rb @@ -26,7 +26,7 @@ module Actions # # @option arguments [String] :model_id The unique identifier of the trained model. (*Required*) # @option arguments [Integer] :part The definition part number. When the definition is loaded for inference the definition parts are streamed in the - # order of their part number. The first part must be +0+ and the final part must be +total_parts - 1+. (*Required*) + # order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model_vocabulary.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model_vocabulary.rb index 857bb3c395..14c168fb91 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model_vocabulary.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/put_trained_model_vocabulary.rb @@ -24,7 +24,7 @@ module MachineLearning module Actions # Create a trained model vocabulary. # This API is supported only for natural language processing (NLP) models. - # The vocabulary is stored in the index as described in +inference_config.*.vocabulary+ of the trained model definition. + # The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. # # @option arguments [String] :model_id The unique identifier of the trained model. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/revert_model_snapshot.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/revert_model_snapshot.rb index f89e841e8b..309417c257 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/revert_model_snapshot.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/revert_model_snapshot.rb @@ -32,7 +32,7 @@ module Actions # snapshot after Black Friday or a critical system failure. # # @option arguments [String] :job_id Identifier for the anomaly detection job. (*Required*) - # @option arguments [String] :snapshot_id You can specify +empty+ as the . Reverting to the empty + # @option arguments [String] :snapshot_id You can specify `empty` as the . Reverting to the empty # snapshot means the anomaly detection job starts learning a new model from # scratch when it is started. (*Required*) # @option arguments [Boolean] :delete_intervening_results If true, deletes the results in the time period between the latest diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/set_upgrade_mode.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/set_upgrade_mode.rb index 6466ca4643..9295430882 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/set_upgrade_mode.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/set_upgrade_mode.rb @@ -36,7 +36,7 @@ module Actions # You can see the current value for the upgrade_mode setting by using the get # machine learning info API. # - # @option arguments [Boolean] :enabled When +true+, it enables +upgrade_mode+ which temporarily halts all job + # @option arguments [Boolean] :enabled When `true`, it enables `upgrade_mode` which temporarily halts all job # and datafeed tasks and prohibits new job and datafeed tasks from # starting. # @option arguments [Time] :timeout The time to wait for the request to be completed. Server default: 30s. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_data_frame_analytics.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_data_frame_analytics.rb index cfc856a952..2466344c74 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_data_frame_analytics.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_data_frame_analytics.rb @@ -27,7 +27,7 @@ module Actions # throughout its lifecycle. # If the destination index does not exist, it is created automatically the # first time you start the data frame analytics job. The - # +index.number_of_shards+ and +index.number_of_replicas+ settings for the + # `index.number_of_shards` and `index.number_of_replicas` settings for the # destination index are copied from the source index. If there are multiple # source indices, the destination index copies the highest setting values. The # mappings for the destination index are also copied from the source indices. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_datafeed.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_datafeed.rb index 315f2f7137..47040b9740 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_datafeed.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_datafeed.rb @@ -36,15 +36,15 @@ module Actions # alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric # characters. (*Required*) # @option arguments [String, Time] :end The time that the datafeed should end, which can be specified by using one of the following formats: - # - ISO 8601 format with milliseconds, for example +2017-01-22T06:00:00.000Z+ - # - ISO 8601 format without milliseconds, for example +2017-01-22T06:00:00+00:00+ - # - Milliseconds since the epoch, for example +1485061200000+ - # Date-time arguments using either of the ISO 8601 formats must have a time zone designator, where +Z+ is accepted - # as an abbreviation for UTC time. When a URL is expected (for example, in browsers), the +++ used in time zone - # designators must be encoded as +%2B+. + # - ISO 8601 format with milliseconds, for example `2017-01-22T06:00:00.000Z` + # - ISO 8601 format without milliseconds, for example `2017-01-22T06:00:00+00:00` + # - Milliseconds since the epoch, for example `1485061200000` + # Date-time arguments using either of the ISO 8601 formats must have a time zone designator, where `Z` is accepted + # as an abbreviation for UTC time. When a URL is expected (for example, in browsers), the `+` used in time zone + # designators must be encoded as `%2B`. # The end time value is exclusive. If you do not specify an end time, the datafeed # runs continuously. - # @option arguments [String, Time] :start The time that the datafeed should begin, which can be specified by using the same formats as the +end+ parameter. + # @option arguments [String, Time] :start The time that the datafeed should begin, which can be specified by using the same formats as the `end` parameter. # This value is inclusive. # If you do not specify a start time and the datafeed is associated with a new anomaly detection job, the analysis # starts from the earliest time for which data is available. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_trained_model_deployment.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_trained_model_deployment.rb index 6af12449bb..f01107ab57 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_trained_model_deployment.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/start_trained_model_deployment.rb @@ -27,8 +27,8 @@ module Actions # # @option arguments [String] :model_id The unique identifier of the trained model. Currently, only PyTorch models are supported. (*Required*) # @option arguments [Integer, String] :cache_size The inference cache size (in memory outside the JVM heap) per node for the model. - # The default value is the same size as the +model_size_bytes+. To disable the cache, - # +0b+ can be provided. + # The default value is the same size as the `model_size_bytes`. To disable the cache, + # `0b` can be provided. # @option arguments [String] :deployment_id A unique identifier for the deployment of the model. # @option arguments [Integer] :number_of_allocations The number of model allocations on each node where the model is deployed. # All allocations on a node share the same copy of the model in memory but use diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/stop_datafeed.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/stop_datafeed.rb index 60907e993f..b000921527 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/stop_datafeed.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/stop_datafeed.rb @@ -27,16 +27,16 @@ module Actions # multiple times throughout its lifecycle. # # @option arguments [String] :datafeed_id Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated - # list of datafeeds or a wildcard expression. You can close all datafeeds by using +_all+ or by specifying +*+ as + # list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as # the identifier. (*Required*) # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no datafeeds that match. - # - Contains the +_all+ string or no identifiers and there are no matches. + # - Contains the `_all` string or no identifiers and there are no matches. # - Contains wildcard expressions and there are only partial matches. - # If +true+, the API returns an empty datafeeds array when there are no matches and the subset of results when - # there are partial matches. If +false+, the API returns a 404 status code when there are no matches or only + # If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when + # there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only # partial matches. Server default: true. - # @option arguments [Boolean] :force If +true+, the datafeed is stopped forcefully. + # @option arguments [Boolean] :force If `true`, the datafeed is stopped forcefully. # @option arguments [Time] :timeout Specifies the amount of time to wait until a datafeed stops. Server default: 20s. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/stop_trained_model_deployment.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/stop_trained_model_deployment.rb index 9316c25ad1..207a993253 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/stop_trained_model_deployment.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/stop_trained_model_deployment.rb @@ -26,9 +26,9 @@ module Actions # # @option arguments [String] :model_id The unique identifier of the trained model. (*Required*) # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; - # contains the +_all+ string or no identifiers and there are no matches; or contains wildcard expressions and + # contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and # there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. - # If +false+, the request returns a 404 status code when there are no matches or only partial matches. Server default: true. + # If `false`, the request returns a 404 status code when there are no matches or only partial matches. Server default: true. # @option arguments [Boolean] :force Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you # restart the model deployment. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/update_datafeed.rb b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/update_datafeed.rb index 85074eb75a..e17a036e0b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/update_datafeed.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/machine_learning/update_datafeed.rb @@ -31,17 +31,17 @@ module Actions # @option arguments [String] :datafeed_id A numerical character string that uniquely identifies the datafeed. # This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. # It must start and end with alphanumeric characters. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +true+, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the - # +_all+ string or when no indices are specified. Server default: true. + # @option arguments [Boolean] :allow_no_indices If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + # `_all` string or when no indices are specified. Server default: true. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. If the request can target data streams, this argument determines # whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: - # - +all+: Match any data stream or index, including hidden ones. - # - +closed+: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. - # - +hidden+: Match hidden data streams and hidden indices. Must be combined with +open+, +closed+, or both. - # - +none+: Wildcard patterns are not accepted. - # - +open+: Match open, non-hidden indices. Also matches any non-hidden data stream. Server default: open. - # @option arguments [Boolean] :ignore_throttled If +true+, concrete, expanded or aliased indices are ignored when frozen. Server default: true. - # @option arguments [Boolean] :ignore_unavailable If +true+, unavailable indices (missing or closed) are ignored. + # - `all`: Match any data stream or index, including hidden ones. + # - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. + # - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. + # - `none`: Wildcard patterns are not accepted. + # - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. Server default: open. + # @option arguments [Boolean] :ignore_throttled If `true`, concrete, expanded or aliased indices are ignored when frozen. Server default: true. + # @option arguments [Boolean] :ignore_unavailable If `true`, unavailable indices (missing or closed) are ignored. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/mget.rb b/elasticsearch-api/lib/elasticsearch/api/actions/mget.rb index ddba03593c..376301a9e1 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/mget.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/mget.rb @@ -26,29 +26,29 @@ module Actions # If you specify an index in the request URI, you only need to specify the document IDs in the request body. # To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. # **Filter source fields** - # By default, the +_source+ field is returned for every document (if stored). - # Use the +_source+ and +_source_include+ or +source_exclude+ attributes to filter what fields are returned for a particular document. - # You can include the +_source+, +_source_includes+, and +_source_excludes+ query parameters in the request URI to specify the defaults to use when there are no per-document instructions. + # By default, the `_source` field is returned for every document (if stored). + # Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. + # You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. # **Get stored fields** - # Use the +stored_fields+ attribute to specify the set of stored fields you want to retrieve. + # Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. # Any requested fields that are not stored are ignored. - # You can include the +stored_fields+ query parameter in the request URI to specify the defaults to use when there are no per-document instructions. + # You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. # - # @option arguments [String] :index Name of the index to retrieve documents from when +ids+ are specified, or when a document in the +docs+ array does not specify an index. + # @option arguments [String] :index Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. # @option arguments [Boolean] :force_synthetic_source Should this request force synthetic _source? # Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. # Fetches with this enabled will be slower the enabling synthetic source natively in the index. # @option arguments [String] :preference Specifies the node or shard the operation should be performed on. Random by default. - # @option arguments [Boolean] :realtime If +true+, the request is real-time as opposed to near-real-time. Server default: true. - # @option arguments [Boolean] :refresh If +true+, the request refreshes relevant shards before retrieving documents. + # @option arguments [Boolean] :realtime If `true`, the request is real-time as opposed to near-real-time. Server default: true. + # @option arguments [Boolean] :refresh If `true`, the request refreshes relevant shards before retrieving documents. # @option arguments [String] :routing Custom value used to route operations to a specific shard. - # @option arguments [Boolean, String, Array] :_source True or false to return the +_source+ field or not, or a list of fields to return. + # @option arguments [Boolean, String, Array] :_source True or false to return the `_source` field or not, or a list of fields to return. # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude from the response. - # You can also use this parameter to exclude fields from the subset specified in +_source_includes+ query parameter. + # You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response. - # If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the +_source_excludes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. - # @option arguments [String, Array] :stored_fields If +true+, retrieves the document fields stored in the index rather than the document +_source+. Server default: false. + # If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. + # @option arguments [String, Array] :stored_fields If `true`, retrieves the document fields stored in the index rather than the document `_source`. Server default: false. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/msearch.rb b/elasticsearch-api/lib/elasticsearch/api/actions/msearch.rb index 8a9ff5ce7e..3ef5a922f7 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/msearch.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/msearch.rb @@ -24,16 +24,22 @@ module Actions # Run multiple searches. # The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. # The structure is as follows: - # + - # header\n - # body\n - # header\n - # body\n - # + + # + # ``` + # header + # + # body + # + # header + # + # body + # + # ``` + # # This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. - # IMPORTANT: The final line of data must end with a newline character +\n+. - # Each newline character may be preceded by a carriage return +\r+. - # When sending requests to this endpoint the +Content-Type+ header should be set to +application/x-ndjson+. + # IMPORTANT: The final line of data must end with a newline character `\n`. + # Each newline character may be preceded by a carriage return `\r`. + # When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. # # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and index aliases to search. # @option arguments [Boolean] :allow_no_indices If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. @@ -48,7 +54,7 @@ module Actions # Typically, this adds a small overhead to a request. # However, using computationally expensive named queries on a large number of hits may add significant overhead. # @option arguments [Integer] :max_concurrent_searches Maximum number of concurrent searches the multi search API can execute. - # Defaults to +max(1, (# of data nodes * min(search thread pool size, 10)))+. + # Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. # @option arguments [Integer] :max_concurrent_shard_requests Maximum number of concurrent shard requests that each sub-search request executes per node. Server default: 5. # @option arguments [Integer] :pre_filter_shard_size Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. # @option arguments [Boolean] :rest_total_hits_as_int If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/msearch_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/msearch_template.rb index 04e0cc75fb..d23beb8125 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/msearch_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/msearch_template.rb @@ -23,26 +23,27 @@ module API module Actions # Run multiple templated searches. # Run multiple templated searches with a single request. - # If you are providing a text file or text input to +curl+, use the +--data-binary+ flag instead of +-d+ to preserve newlines. + # If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. # For example: - # + + # + # ``` # $ cat requests # { "index": "my-index" } # { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} # { "index": "my-other-index" } # { "id": "my-other-search-template", "params": { "query_type": "match_all" }} # $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo - # + + # ``` # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases to search. - # It supports wildcards (+*+). - # To search all data streams and indices, omit this parameter or use +*+. - # @option arguments [Boolean] :ccs_minimize_roundtrips If +true+, network round-trips are minimized for cross-cluster search requests. Server default: true. + # It supports wildcards (`*`). + # To search all data streams and indices, omit this parameter or use `*`. + # @option arguments [Boolean] :ccs_minimize_roundtrips If `true`, network round-trips are minimized for cross-cluster search requests. Server default: true. # @option arguments [Integer] :max_concurrent_searches The maximum number of concurrent searches the API can run. # @option arguments [String] :search_type The type of the search operation. - # @option arguments [Boolean] :rest_total_hits_as_int If +true+, the response returns +hits.total+ as an integer. - # If +false+, it returns +hits.total+ as an object. - # @option arguments [Boolean] :typed_keys If +true+, the response prefixes aggregation and suggester names with their respective types. + # @option arguments [Boolean] :rest_total_hits_as_int If `true`, the response returns `hits.total` as an integer. + # If `false`, it returns `hits.total` as an object. + # @option arguments [Boolean] :typed_keys If `true`, the response prefixes aggregation and suggester names with their respective types. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body search_templates # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/mtermvectors.rb b/elasticsearch-api/lib/elasticsearch/api/actions/mtermvectors.rb index ecef716fb1..bf0792817d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/mtermvectors.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/mtermvectors.rb @@ -25,26 +25,26 @@ module Actions # Get multiple term vectors with a single request. # You can specify existing documents by index and ID or provide artificial documents in the body of the request. # You can specify the index in the request body or request URI. - # The response contains a +docs+ array with all the fetched termvectors. + # The response contains a `docs` array with all the fetched termvectors. # Each element has the structure provided by the termvectors API. # **Artificial documents** - # You can also use +mtermvectors+ to generate term vectors for artificial documents provided in the body of the request. - # The mapping used is determined by the specified +_index+. + # You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. + # The mapping used is determined by the specified `_index`. # # @option arguments [String] :index The name of the index that contains the documents. # @option arguments [Array] :ids A comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body # @option arguments [String, Array] :fields A comma-separated list or wildcard expressions of fields to include in the statistics. - # It is used as the default list unless a specific field list is provided in the +completion_fields+ or +fielddata_fields+ parameters. - # @option arguments [Boolean] :field_statistics If +true+, the response includes the document count, sum of document frequencies, and sum of total term frequencies. Server default: true. - # @option arguments [Boolean] :offsets If +true+, the response includes term offsets. Server default: true. - # @option arguments [Boolean] :payloads If +true+, the response includes term payloads. Server default: true. - # @option arguments [Boolean] :positions If +true+, the response includes term positions. Server default: true. + # It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. + # @option arguments [Boolean] :field_statistics If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. Server default: true. + # @option arguments [Boolean] :offsets If `true`, the response includes term offsets. Server default: true. + # @option arguments [Boolean] :payloads If `true`, the response includes term payloads. Server default: true. + # @option arguments [Boolean] :positions If `true`, the response includes term positions. Server default: true. # @option arguments [String] :preference The node or shard the operation should be performed on. # It is random by default. # @option arguments [Boolean] :realtime If true, the request is real-time as opposed to near-real-time. Server default: true. # @option arguments [String] :routing A custom value used to route operations to a specific shard. # @option arguments [Boolean] :term_statistics If true, the response includes term frequency and document frequency. - # @option arguments [Integer] :version If +true+, returns the document version as part of a hit. + # @option arguments [Integer] :version If `true`, returns the document version as part of a hit. # @option arguments [String] :version_type The version type. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/nodes/clear_repositories_metering_archive.rb b/elasticsearch-api/lib/elasticsearch/api/actions/nodes/clear_repositories_metering_archive.rb index bf24782511..fb27637636 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/nodes/clear_repositories_metering_archive.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/nodes/clear_repositories_metering_archive.rb @@ -30,7 +30,7 @@ module Actions # support SLA of official GA features. # # @option arguments [String, Array] :node_id Comma-separated list of node IDs or names used to limit returned information. (*Required*) - # @option arguments [Integer] :max_archive_version Specifies the maximum +archive_version+ to be cleared from the archive. (*Required*) + # @option arguments [Integer] :max_archive_version Specifies the maximum `archive_version` to be cleared from the archive. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/nodes/stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/nodes/stats.rb index 35b99da1b6..0aed278b27 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/nodes/stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/nodes/stats.rb @@ -37,7 +37,7 @@ module Actions # @option arguments [String] :level Indicates whether statistics are aggregated at the cluster, index, or shard level. # @option arguments [Time] :timeout Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Array] :types A comma-separated list of document types for the indexing index metric. - # @option arguments [Boolean] :include_unloaded_segments If +true+, the response includes information from segments that are not loaded into memory. + # @option arguments [Boolean] :include_unloaded_segments If `true`, the response includes information from segments that are not loaded into memory. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/nodes/usage.rb b/elasticsearch-api/lib/elasticsearch/api/actions/nodes/usage.rb index e28f8c4b22..aae2647fc4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/nodes/usage.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/nodes/usage.rb @@ -24,9 +24,9 @@ module Nodes module Actions # Get feature usage information. # - # @option arguments [String, Array] :node_id A comma-separated list of node IDs or names to limit the returned information; use +_local+ to return information from the node you're connecting to, leave empty to get information from all nodes + # @option arguments [String, Array] :node_id A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes # @option arguments [String, Array] :metric Limits the information returned to the specific metrics. - # A comma-separated list of the following options: +_all+, +rest_actions+. + # A comma-separated list of the following options: `_all`, `rest_actions`. # @option arguments [Time] :timeout Period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/open_point_in_time.rb b/elasticsearch-api/lib/elasticsearch/api/actions/open_point_in_time.rb index f77430ed07..84995e2e77 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/open_point_in_time.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/open_point_in_time.rb @@ -26,17 +26,17 @@ module Actions # which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the # state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple # search requests using the same point in time. For example, if refreshes happen between - # +search_after+ requests, then the results of those requests might not be consistent as changes happening + # `search_after` requests, then the results of those requests might not be consistent as changes happening # between searches are only visible to the more recent point in time. # A point in time must be opened explicitly before being used in search requests. - # A subsequent search request with the +pit+ parameter must not specify +index+, +routing+, or +preference+ values as these parameters are copied from the point in time. - # Just like regular searches, you can use +from+ and +size+ to page through point in time search results, up to the first 10,000 hits. - # If you want to retrieve more hits, use PIT with +search_after+. + # A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. + # Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. + # If you want to retrieve more hits, use PIT with `search_after`. # IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. - # When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a +NoShardAvailableActionException+ exception. + # When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. # To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. # **Keeping point in time alive** - # The +keep_alive+ parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. + # The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. # The value does not need to be long enough to process all data — it just needs to be long enough for the next request. # Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. # Once the smaller segments are no longer needed they are deleted. @@ -48,18 +48,18 @@ module Actions # Note that a point-in-time doesn't prevent its associated indices from being deleted. # You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. # - # @option arguments [String, Array] :index A comma-separated list of index names to open point in time; use +_all+ or empty string to perform the operation on all indices (*Required*) + # @option arguments [String, Array] :index A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices (*Required*) # @option arguments [Time] :keep_alive Extend the length of time that the point in time persists. (*Required*) - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [String] :preference The node or shard the operation should be performed on. # By default, it is random. # @option arguments [String] :routing A custom value that is used to route operations to a specific shard. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # It supports comma-separated values, such as +open,hidden+. Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. + # It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. # @option arguments [Boolean] :allow_partial_search_results Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. - # If +false+, creating a point in time request when a shard is missing or unavailable will throw an exception. - # If +true+, the point in time will contain all the shards that are available at the time of the request. + # If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. + # If `true`, the point in time will contain all the shards that are available at the time of the request. # @option arguments [Integer] :max_concurrent_shard_requests Maximum number of concurrent shard requests that each sub-search request executes per node. Server default: 5. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/put_script.rb b/elasticsearch-api/lib/elasticsearch/api/actions/put_script.rb index 8f992288d0..d461e1d46c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/put_script.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/put_script.rb @@ -30,10 +30,10 @@ module Actions # To prevent errors, the API immediately compiles the script or template in this context. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. - # It can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # It can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/query_rules/put_ruleset.rb b/elasticsearch-api/lib/elasticsearch/api/actions/query_rules/put_ruleset.rb index 3c6c94365d..0fbae6f2ec 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/query_rules/put_ruleset.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/query_rules/put_ruleset.rb @@ -24,8 +24,8 @@ module QueryRules module Actions # Create or update a query ruleset. # There is a limit of 100 rules per ruleset. - # This limit can be increased by using the +xpack.applications.rules.max_rules_per_ruleset+ cluster setting. - # IMPORTANT: Due to limitations within pinned queries, you can only select documents using +ids+ or +docs+, but cannot use both in single rule. + # This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + # IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. # It is advised to use one or the other in query rulesets, to avoid errors. # Additionally, pinned queries have a maximum limit of 100 pinned hits. # If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/rank_eval.rb b/elasticsearch-api/lib/elasticsearch/api/actions/rank_eval.rb index d7f4a5f210..a350c112e1 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/rank_eval.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/rank_eval.rb @@ -25,11 +25,11 @@ module Actions # Evaluate the quality of ranked search results over a set of typical search queries. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and index aliases used to limit the request. - # Wildcard (+*+) expressions are supported. - # To target all data streams and indices in a cluster, omit this parameter or use +_all+ or +*+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. + # Wildcard (`*`) expressions are supported. + # To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. - # @option arguments [Boolean] :ignore_unavailable If +true+, missing or closed indices are not included in the response. + # @option arguments [Boolean] :ignore_unavailable If `true`, missing or closed indices are not included in the response. # @option arguments [String] :search_type Search operation type # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/reindex.rb b/elasticsearch-api/lib/elasticsearch/api/actions/reindex.rb index c79b5b9950..b223128844 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/reindex.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/reindex.rb @@ -27,42 +27,43 @@ module Actions # The source can be any existing index, alias, or data stream. # The destination must differ from the source. # For example, you cannot reindex a data stream into itself. - # IMPORTANT: Reindex requires +_source+ to be enabled for all documents in the source. + # IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. # The destination should be configured as wanted before calling the reindex API. # Reindex does not copy the settings from the source or its associated template. # Mappings, shard counts, and replicas, for example, must be configured ahead of time. # If the Elasticsearch security features are enabled, you must have the following security privileges: - # * The +read+ index privilege for the source data stream, index, or alias. - # * The +write+ index privilege for the destination data stream, index, or index alias. - # * To automatically create a data stream or index with a reindex API request, you must have the +auto_configure+, +create_index+, or +manage+ index privilege for the destination data stream, index, or alias. - # * If reindexing from a remote cluster, the +source.remote.user+ must have the +monitor+ cluster privilege and the +read+ index privilege for the source data stream, index, or alias. - # If reindexing from a remote cluster, you must explicitly allow the remote host in the +reindex.remote.whitelist+ setting. + # * The `read` index privilege for the source data stream, index, or alias. + # * The `write` index privilege for the destination data stream, index, or index alias. + # * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. + # * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. + # If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. # Automatic data stream creation requires a matching index template with data stream enabled. - # The +dest+ element can be configured like the index API to control optimistic concurrency control. - # Omitting +version_type+ or setting it to +internal+ causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. - # Setting +version_type+ to +external+ causes Elasticsearch to preserve the +version+ from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. - # Setting +op_type+ to +create+ causes the reindex API to create only missing documents in the destination. + # The `dest` element can be configured like the index API to control optimistic concurrency control. + # Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. + # Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. + # Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. # All existing documents will cause a version conflict. - # IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an +op_type+ of +create+. + # IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. # A reindex can only add new documents to a destination data stream. # It cannot update existing documents in a destination data stream. # By default, version conflicts abort the reindex process. - # To continue reindexing if there are conflicts, set the +conflicts+ request body property to +proceed+. + # To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. # In this case, the response includes a count of the version conflicts that were encountered. - # Note that the handling of other error types is unaffected by the +conflicts+ property. - # Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than +max_docs+ until it has successfully indexed +max_docs+ documents into the target or it has gone through every document in the source query. + # Note that the handling of other error types is unaffected by the `conflicts` property. + # Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. # NOTE: The reindex API makes no effort to handle ID collisions. # The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. # Instead, make sure that IDs are unique by using a script. # **Running reindex asynchronously** - # If the request contains +wait_for_completion=false+, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. - # Elasticsearch creates a record of this task as a document at +_tasks/+. + # If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. + # Elasticsearch creates a record of this task as a document at `_tasks/`. # **Reindex from multiple sources** # If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. # That way you can resume the process if there are any errors by removing the partially completed source and starting over. # It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. # For example, you can use a bash script like this: - # + + # + # ``` # for index in i1 i2 i3 i4 i5; do # curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ # "source": { @@ -73,18 +74,21 @@ module Actions # } # }' # done - # + + # ``` + # # **Throttling** - # Set +requests_per_second+ to any positive decimal number (+1.4+, +6+, +1000+, for example) to throttle the rate at which reindex issues batches of index operations. + # Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. # Requests are throttled by padding each batch with a wait time. - # To turn off throttling, set +requests_per_second+ to +-1+. + # To turn off throttling, set `requests_per_second` to `-1`. # The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. - # The padding time is the difference between the batch size divided by the +requests_per_second+ and the time spent writing. - # By default the batch size is +1000+, so if +requests_per_second+ is set to +500+: - # + + # The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. + # By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + # + # ``` # target_time = 1000 / 500 per second = 2 seconds # wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds - # + + # ``` + # # Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. # This is "bursty" instead of "smooth". # **Slicing** @@ -92,86 +96,88 @@ module Actions # This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. # NOTE: Reindexing from remote clusters does not support manual or automatic slicing. # You can slice a reindex request manually by providing a slice ID and total number of slices to each request. - # You can also let reindex automatically parallelize by using sliced scroll to slice on +_id+. - # The +slices+ parameter specifies the number of slices to use. - # Adding +slices+ to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: + # You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. + # The `slices` parameter specifies the number of slices to use. + # Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: # * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. - # * Fetching the status of the task for the request with +slices+ only contains the status of completed slices. + # * Fetching the status of the task for the request with `slices` only contains the status of completed slices. # * These sub-requests are individually addressable for things like cancellation and rethrottling. - # * Rethrottling the request with +slices+ will rethrottle the unfinished sub-request proportionally. - # * Canceling the request with +slices+ will cancel each sub-request. - # * Due to the nature of +slices+, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. - # * Parameters like +requests_per_second+ and +max_docs+ on a request with +slices+ are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using +max_docs+ with +slices+ might not result in exactly +max_docs+ documents being reindexed. + # * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. + # * Canceling the request with `slices` will cancel each sub-request. + # * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. + # * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. # * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. - # If slicing automatically, setting +slices+ to +auto+ will choose a reasonable number for most indices. + # If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. # If slicing manually or otherwise tuning automatic slicing, use the following guidelines. # Query performance is most efficient when the number of slices is equal to the number of shards in the index. - # If that number is large (for example, +500+), choose a lower number as too many slices will hurt performance. + # If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. # Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. # Indexing performance scales linearly across available resources with the number of slices. # Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. # **Modify documents during reindexing** - # Like +_update_by_query+, reindex operations support a script that modifies the document. - # Unlike +_update_by_query+, the script is allowed to modify the document's metadata. - # Just as in +_update_by_query+, you can set +ctx.op+ to change the operation that is run on the destination. - # For example, set +ctx.op+ to +noop+ if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the +noop+ counter in the response body. - # Set +ctx.op+ to +delete+ if your script decides that the document must be deleted from the destination. - # The deletion will be reported in the +deleted+ counter in the response body. - # Setting +ctx.op+ to anything else will return an error, as will setting any other field in +ctx+. + # Like `_update_by_query`, reindex operations support a script that modifies the document. + # Unlike `_update_by_query`, the script is allowed to modify the document's metadata. + # Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. + # For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. + # Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. + # The deletion will be reported in the `deleted` counter in the response body. + # Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. # Think of the possibilities! Just be careful; you are able to change: - # * +_id+ - # * +_index+ - # * +_version+ - # * +_routing+ - # Setting +_version+ to +null+ or clearing it from the +ctx+ map is just like not sending the version in an indexing request. + # * `_id` + # * `_index` + # * `_version` + # * `_routing` + # Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. # It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. # **Reindex from remote** # Reindex supports reindexing from a remote Elasticsearch cluster. - # The +host+ parameter must contain a scheme, host, port, and optional path. - # The +username+ and +password+ parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. + # The `host` parameter must contain a scheme, host, port, and optional path. + # The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. # Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. # There are a range of settings available to configure the behavior of the HTTPS connection. # When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. - # Remote hosts must be explicitly allowed with the +reindex.remote.whitelist+ setting. + # Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. # It can be set to a comma delimited list of allowed remote host and port combinations. # Scheme is ignored; only the host and port are used. # For example: - # + + # + # ``` # reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] - # + + # ``` + # # The list of allowed hosts must be configured on any nodes that will coordinate the reindex. # This feature should work with remote clusters of any version of Elasticsearch. # This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. # WARNING: Elasticsearch does not support forward compatibility across major versions. # For example, you cannot reindex from a 7.x cluster into a 6.x cluster. - # To enable queries sent to older versions of Elasticsearch, the +query+ parameter is sent directly to the remote host without validation or modification. + # To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. # NOTE: Reindexing from remote clusters does not support manual or automatic slicing. # Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. # If the remote index includes very large documents you'll need to use a smaller batch size. - # It is also possible to set the socket read timeout on the remote connection with the +socket_timeout+ field and the connection timeout with the +connect_timeout+ field. + # It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. # Both default to 30 seconds. # **Configuring SSL parameters** # Reindex from remote supports configurable SSL settings. - # These must be specified in the +elasticsearch.yml+ file, with the exception of the secure settings, which you add in the Elasticsearch keystore. + # These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. # It is not possible to configure SSL in the body of the reindex request. # - # @option arguments [Boolean] :refresh If +true+, the request refreshes affected shards to make this operation visible to search. + # @option arguments [Boolean] :refresh If `true`, the request refreshes affected shards to make this operation visible to search. # @option arguments [Float] :requests_per_second The throttle for this request in sub-requests per second. # By default, there is no throttle. Server default: -1. # @option arguments [Time] :scroll The period of time that a consistent view of the index should be maintained for scrolled search. # @option arguments [Integer, String] :slices The number of slices this task should be divided into. # It defaults to one slice, which means the task isn't sliced into subtasks.Reindex supports sliced scroll to parallelize the reindexing process. - # This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.NOTE: Reindexing from remote clusters does not support manual or automatic slicing.If set to +auto+, Elasticsearch chooses the number of slices to use. + # This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.NOTE: Reindexing from remote clusters does not support manual or automatic slicing.If set to `auto`, Elasticsearch chooses the number of slices to use. # This setting will use one slice per shard, up to a certain limit. # If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. Server default: 1. # @option arguments [Time] :timeout The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. # By default, Elasticsearch waits for at least one minute before failing. # The actual wait time could be longer, particularly when multiple waits occur. Server default: 1m. # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set it to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). + # Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). # The default value is one, which means it waits for each primary shard to be active. Server default: 1. - # @option arguments [Boolean] :wait_for_completion If +true+, the request blocks until the operation is complete. Server default: true. - # @option arguments [Boolean] :require_alias If +true+, the destination must be an index alias. + # @option arguments [Boolean] :wait_for_completion If `true`, the request blocks until the operation is complete. Server default: true. + # @option arguments [Boolean] :require_alias If `true`, the destination must be an index alias. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/reindex_rethrottle.rb b/elasticsearch-api/lib/elasticsearch/api/actions/reindex_rethrottle.rb index 251b0ddcfd..32d74f6bdb 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/reindex_rethrottle.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/reindex_rethrottle.rb @@ -24,16 +24,18 @@ module Actions # Throttle a reindex operation. # Change the number of requests per second for a particular reindex operation. # For example: - # + + # + # ``` # POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 - # + + # ``` + # # Rethrottling that speeds up the query takes effect immediately. # Rethrottling that slows down the query will take effect after completing the current batch. # This behavior prevents scroll timeouts. # # @option arguments [String] :task_id The task identifier, which can be found by using the tasks API. (*Required*) # @option arguments [Float] :requests_per_second The throttle for this request in sub-requests per second. - # It can be either +-1+ to turn off throttling or any decimal number like +1.7+ or +12+ to throttle to that level. + # It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/render_search_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/render_search_template.rb index 0d73feedac..79327dfb93 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/render_search_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/render_search_template.rb @@ -25,7 +25,7 @@ module Actions # Render a search template as a search request body. # # @option arguments [String] :id The ID of the search template to render. - # If no +source+ is specified, this or the +id+ request body parameter is required. + # If no `source` is specified, this or the `id` request body parameter is required. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/scroll.rb b/elasticsearch-api/lib/elasticsearch/api/actions/scroll.rb index 6aef1c7613..c663f4c816 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/scroll.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/scroll.rb @@ -22,11 +22,11 @@ module Elasticsearch module API module Actions # Run a scrolling search. - # IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the +search_after+ parameter with a point in time (PIT). + # IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). # The scroll API gets large sets of results from a single scrolling search request. - # To get the necessary scroll ID, submit a search API request that includes an argument for the +scroll+ query parameter. - # The +scroll+ parameter indicates how long Elasticsearch should retain the search context for the request. - # The search response returns a scroll ID in the +_scroll_id+ response body parameter. + # To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. + # The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. + # The search response returns a scroll ID in the `_scroll_id` response body parameter. # You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. # If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. # You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/search.rb b/elasticsearch-api/lib/elasticsearch/api/actions/search.rb index 0d8d3bdd16..a31292acb0 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/search.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/search.rb @@ -23,12 +23,12 @@ module API module Actions # Run a search. # Get search hits that match the query defined in the request. - # You can provide search queries using the +q+ query string parameter or the request body. + # You can provide search queries using the `q` query string parameter or the request body. # If both are specified, only the query parameter is used. # If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. - # To search a point in time (PIT) for an alias, you must have the +read+ index privilege for the alias's data streams or indices. + # To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. # **Search slicing** - # When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the +slice+ and +pit+ properties. + # When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. # By default the splitting is done first on the shards, then locally on each shard. # The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. # For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. @@ -37,113 +37,113 @@ module Actions # This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases to search. - # It supports wildcards (+*+). - # To search all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # It supports wildcards (`*`). + # To search all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. - # @option arguments [Boolean] :allow_partial_search_results If +true+ and there are shard request timeouts or shard failures, the request returns partial results. - # If +false+, it returns an error with no partial results.To override the default behavior, you can set the +search.default_allow_partial_results+ cluster setting to +false+. Server default: true. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. + # @option arguments [Boolean] :allow_partial_search_results If `true` and there are shard request timeouts or shard failures, the request returns partial results. + # If `false`, it returns an error with no partial results.To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. Server default: true. # @option arguments [String] :analyzer The analyzer to use for the query string. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [Boolean] :analyze_wildcard If +true+, wildcard and prefix queries are analyzed. - # This parameter can be used only when the +q+ query string parameter is specified. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [Boolean] :analyze_wildcard If `true`, wildcard and prefix queries are analyzed. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [Integer] :batched_reduce_size The number of shard results that should be reduced at once on the coordinating node. # If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. Server default: 512. - # @option arguments [Boolean] :ccs_minimize_roundtrips If +true+, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. Server default: true. - # @option arguments [String] :default_operator The default operator for the query string query: +AND+ or +OR+. - # This parameter can be used only when the +q+ query string parameter is specified. Server default: OR. + # @option arguments [Boolean] :ccs_minimize_roundtrips If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. Server default: true. + # @option arguments [String] :default_operator The default operator for the query string query: `AND` or `OR`. + # This parameter can be used only when the `q` query string parameter is specified. Server default: OR. # @option arguments [String] :df The field to use as a default when no field prefix is given in the query string. - # This parameter can be used only when the +q+ query string parameter is specified. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [String, Array] :docvalue_fields A comma-separated list of fields to return as the docvalue representation of a field for each hit. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # It supports comma-separated values such as +open,hidden+. Server default: open. - # @option arguments [Boolean] :explain If +true+, the request returns detailed information about score computation as part of a hit. - # @option arguments [Boolean] :ignore_throttled If +true+, concrete, expanded or aliased indices will be ignored when frozen. Server default: true. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :include_named_queries_score If +true+, the response includes the score contribution from any named queries.This functionality reruns each named query on every hit in a search response. + # It supports comma-separated values such as `open,hidden`. Server default: open. + # @option arguments [Boolean] :explain If `true`, the request returns detailed information about score computation as part of a hit. + # @option arguments [Boolean] :ignore_throttled If `true`, concrete, expanded or aliased indices will be ignored when frozen. Server default: true. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :include_named_queries_score If `true`, the response includes the score contribution from any named queries.This functionality reruns each named query on every hit in a search response. # Typically, this adds a small overhead to a request. # However, using computationally expensive named queries on a large number of hits may add significant overhead. - # @option arguments [Boolean] :lenient If +true+, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - # This parameter can be used only when the +q+ query string parameter is specified. + # @option arguments [Boolean] :lenient If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [Integer] :max_concurrent_shard_requests The number of concurrent shard requests per node that the search runs concurrently. # This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. Server default: 5. # @option arguments [String] :preference The nodes and shards used for the search. # By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. # Valid values are: - # - +_only_local+ to run the search only on shards on the local node. - # - +_local+ to, if possible, run the search on shards on the local node, or if not, select shards using the default method. - # - +_only_nodes:,+ to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. - # - +_prefer_nodes:,+ to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. - # - +_shards:,+ to run the search only on the specified shards. You can combine this value with other +preference+ values. However, the +_shards+ value must come first. For example: +_shards:2,3|_local+. - # - ++ (any string that does not start with +_+) to route searches with the same ++ to the same shards in the same order. + # - `_only_local` to run the search only on shards on the local node. + # - `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. + # - `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. + # - `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. + # - `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. + # - `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. # @option arguments [Integer] :pre_filter_shard_size A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. # This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). # When unspecified, the pre-filter phase is executed if any of these conditions is met: # - The request targets more than 128 shards. # - The request targets one or more read-only index. # - The primary sort of the query targets an indexed field. - # @option arguments [Boolean] :request_cache If +true+, the caching of search results is enabled for requests where +size+ is +0+. + # @option arguments [Boolean] :request_cache If `true`, the caching of search results is enabled for requests where `size` is `0`. # It defaults to index level settings. # @option arguments [String] :routing A custom value that is used to route operations to a specific shard. # @option arguments [Time] :scroll The period to retain the search context for scrolling. - # By default, this value cannot exceed +1d+ (24 hours). - # You can change this limit by using the +search.max_keep_alive+ cluster-level setting. + # By default, this value cannot exceed `1d` (24 hours). + # You can change this limit by using the `search.max_keep_alive` cluster-level setting. # @option arguments [String] :search_type Indicates how distributed term frequencies are calculated for relevance scoring. - # @option arguments [Array] :stats Specific +tag+ of the request for logging and statistical purposes. + # @option arguments [Array] :stats Specific `tag` of the request for logging and statistical purposes. # @option arguments [String, Array] :stored_fields A comma-separated list of stored fields to return as part of a hit. # If no fields are specified, no stored fields are included in the response. - # If this field is specified, the +_source+ parameter defaults to +false+. - # You can pass +_source: true+ to return both source fields and stored fields in the search response. + # If this field is specified, the `_source` parameter defaults to `false`. + # You can pass `_source: true` to return both source fields and stored fields in the search response. # @option arguments [String] :suggest_field The field to use for suggestions. # @option arguments [String] :suggest_mode The suggest mode. - # This parameter can be used only when the +suggest_field+ and +suggest_text+ query string parameters are specified. Server default: missing. + # This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. Server default: missing. # @option arguments [Integer] :suggest_size The number of suggestions to return. - # This parameter can be used only when the +suggest_field+ and +suggest_text+ query string parameters are specified. + # This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. # @option arguments [String] :suggest_text The source text for which the suggestions should be returned. - # This parameter can be used only when the +suggest_field+ and +suggest_text+ query string parameters are specified. + # This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. # @option arguments [Integer] :terminate_after The maximum number of documents to collect for each shard. # If a query reaches this limit, Elasticsearch terminates the query early. # Elasticsearch collects documents before sorting.IMPORTANT: Use with caution. # Elasticsearch applies this parameter to each shard handling the request. # When possible, let Elasticsearch perform early termination automatically. # Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - # If set to +0+ (default), the query does not terminate early. Server default: 0. + # If set to `0` (default), the query does not terminate early. Server default: 0. # @option arguments [Time] :timeout The period of time to wait for a response from each shard. # If no response is received before the timeout expires, the request fails and returns an error. # It defaults to no timeout. # @option arguments [Boolean, Integer] :track_total_hits The number of hits matching the query to count accurately. - # If +true+, the exact number of hits is returned at the cost of some performance. - # If +false+, the response does not include the total number of hits matching the query. Server default: 10000. - # @option arguments [Boolean] :track_scores If +true+, the request calculates and returns document scores, even if the scores are not used for sorting. - # @option arguments [Boolean] :typed_keys If +true+, aggregation and suggester names are be prefixed by their respective types in the response. - # @option arguments [Boolean] :rest_total_hits_as_int Indicates whether +hits.total+ should be rendered as an integer or an object in the rest search response. - # @option arguments [Boolean] :version If +true+, the request returns the document version as part of a hit. + # If `true`, the exact number of hits is returned at the cost of some performance. + # If `false`, the response does not include the total number of hits matching the query. Server default: 10000. + # @option arguments [Boolean] :track_scores If `true`, the request calculates and returns document scores, even if the scores are not used for sorting. + # @option arguments [Boolean] :typed_keys If `true`, aggregation and suggester names are be prefixed by their respective types in the response. + # @option arguments [Boolean] :rest_total_hits_as_int Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. + # @option arguments [Boolean] :version If `true`, the request returns the document version as part of a hit. # @option arguments [Boolean, String, Array] :_source The source fields that are returned for matching documents. - # These fields are returned in the +hits._source+ property of the search response. + # These fields are returned in the `hits._source` property of the search response. # Valid values are: - # - +true+ to return the entire document source. - # - +false+ to not return the document source. - # - ++ to return the source fields that are specified as a comma-separated list that supports wildcard (+*+) patterns. Server default: true. + # - `true` to return the entire document source. + # - `false` to not return the document source. + # - `` to return the source fields that are specified as a comma-separated list that supports wildcard (`*`) patterns. Server default: true. # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude from the response. - # You can also use this parameter to exclude fields from the subset specified in +_source_includes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. + # You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response. # If this parameter is specified, only these source fields are returned. - # You can exclude fields from this subset using the +_source_excludes+ query parameter. - # If the +_source+ parameter is +false+, this parameter is ignored. - # @option arguments [Boolean] :seq_no_primary_term If +true+, the request returns the sequence number and primary term of the last modification of each hit. + # You can exclude fields from this subset using the `_source_excludes` query parameter. + # If the `_source` parameter is `false`, this parameter is ignored. + # @option arguments [Boolean] :seq_no_primary_term If `true`, the request returns the sequence number and primary term of the last modification of each hit. # @option arguments [String] :q A query in the Lucene query string syntax. # Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing.IMPORTANT: This parameter overrides the query parameter in the request body. # If both parameters are specified, documents matching the query request body parameter are not returned. # @option arguments [Integer] :size The number of hits to return. - # By default, you cannot page through more than 10,000 hits using the +from+ and +size+ parameters. - # To page through more hits, use the +search_after+ parameter. Server default: 10. + # By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + # To page through more hits, use the `search_after` parameter. Server default: 10. # @option arguments [Integer] :from The starting document offset, which must be non-negative. - # By default, you cannot page through more than 10,000 hits using the +from+ and +size+ parameters. - # To page through more hits, use the +search_after+ parameter. Server default: 0. - # @option arguments [String] :sort A comma-separated list of +:+ pairs. + # By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + # To page through more hits, use the `search_after` parameter. Server default: 0. + # @option arguments [String] :sort A comma-separated list of `:` pairs. # @option arguments [Boolean] :force_synthetic_source Should this request force synthetic _source? # Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. # Fetches with this enabled will be slower the enabling synthetic source natively in the index. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/search_application/put.rb b/elasticsearch-api/lib/elasticsearch/api/actions/search_application/put.rb index aa31b7b375..ed98c5af1e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/search_application/put.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/search_application/put.rb @@ -29,7 +29,7 @@ module Actions # SLA of official GA features. # # @option arguments [String] :name The name of the search application to be created or updated. (*Required*) - # @option arguments [Boolean] :create If +true+, this request cannot replace or update existing Search Applications. + # @option arguments [Boolean] :create If `true`, this request cannot replace or update existing Search Applications. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body search_application # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/search_application/render_query.rb b/elasticsearch-api/lib/elasticsearch/api/actions/search_application/render_query.rb index a2183ec60f..084b0e3deb 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/search_application/render_query.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/search_application/render_query.rb @@ -24,9 +24,9 @@ module SearchApplication module Actions # Render a search application query. # Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. - # If a parameter used in the search template is not specified in +params+, the parameter's default value will be used. + # If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. # The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. - # You must have +read+ privileges on the backing alias of the search application. + # You must have `read` privileges on the backing alias of the search application. # This functionality is Experimental and may be changed or removed # completely in a future release. Elastic will take a best effort approach # to fix any issues, but experimental features are not subject to the diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/search_mvt.rb b/elasticsearch-api/lib/elasticsearch/api/actions/search_mvt.rb index 8a5850b60d..0cc5d3dfa8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/search_mvt.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/search_mvt.rb @@ -26,12 +26,13 @@ module Actions # Before using this API, you should be familiar with the Mapbox vector tile specification. # The API returns results as a binary mapbox vector tile. # Internally, Elasticsearch translates a vector tile search API request into a search containing: - # * A +geo_bounding_box+ query on the ++. The query uses the +//+ tile as a bounding box. - # * A +geotile_grid+ or +geohex_grid+ aggregation on the ++. The +grid_agg+ parameter determines the aggregation type. The aggregation uses the +//+ tile as a bounding box. - # * Optionally, a +geo_bounds+ aggregation on the ++. The search only includes this aggregation if the +exact_bounds+ parameter is +true+. - # * If the optional parameter +with_labels+ is +true+, the internal search will include a dynamic runtime field that calls the +getLabelPosition+ function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. - # For example, Elasticsearch may translate a vector tile search API request with a +grid_agg+ argument of +geotile+ and an +exact_bounds+ argument of +true+ into the following search - # + + # * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. + # * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. + # * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. + # * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. + # For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search + # + # ``` # GET my-index/_search # { # "size": 10000, @@ -75,14 +76,15 @@ module Actions # } # } # } - # + + # ``` + # # The API returns results as a binary Mapbox vector tile. # Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: - # * A +hits+ layer containing a feature for each ++ value matching the +geo_bounding_box+ query. - # * An +aggs+ layer containing a feature for each cell of the +geotile_grid+ or +geohex_grid+. The layer only contains features for cells with matching data. + # * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. + # * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. # * A meta layer containing: # * A feature containing a bounding box. By default, this is the bounding box of the tile. - # * Value ranges for any sub-aggregations on the +geotile_grid+ or +geohex_grid+. + # * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. # * Metadata for the search. # The API only returns features that can display at its zoom level. # For example, if a polygon feature has no area at its zoom level, the API omits it. @@ -90,20 +92,20 @@ module Actions # IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. # If you specify both parameters, the query parameter takes precedence. # **Grid precision for geotile** - # For a +grid_agg+ of +geotile+, you can use cells in the +aggs+ layer as tiles for lower zoom levels. - # +grid_precision+ represents the additional zoom levels available through these cells. The final precision is computed by as follows: + + grid_precision+. - # For example, if ++ is 7 and +grid_precision+ is 8, then the +geotile_grid+ aggregation will use a precision of 15. + # For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. + # `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. + # For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. # The maximum final precision is 29. - # The +grid_precision+ also determines the number of cells for the grid as follows: +(2^grid_precision) x (2^grid_precision)+. + # The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. # For example, a value of 8 divides the tile into a grid of 256 x 256 cells. - # The +aggs+ layer only contains features for cells with matching data. + # The `aggs` layer only contains features for cells with matching data. # **Grid precision for geohex** - # For a +grid_agg+ of +geohex+, Elasticsearch uses ++ and +grid_precision+ to calculate a final precision as follows: + + grid_precision+. - # This precision determines the H3 resolution of the hexagonal cells produced by the +geohex+ aggregation. + # For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. + # This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. # The following table maps the H3 resolution for each precision. - # For example, if ++ is 3 and +grid_precision+ is 3, the precision is 6. + # For example, if `` is 3 and `grid_precision` is 3, the precision is 6. # At a precision of 6, hexagonal cells have an H3 resolution of 2. - # If ++ is 3 and +grid_precision+ is 4, the precision is 7. + # If `` is 3 and `grid_precision` is 4, the precision is 7. # At a precision of 7, hexagonal cells have an H3 resolution of 3. # | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | # | --------- | ---------------- | ------------- | ----------------| ----- | @@ -146,13 +148,13 @@ module Actions # @option arguments [Integer] :zoom Zoom level for the vector tile to search (*Required*) # @option arguments [Integer] :x X coordinate for the vector tile to search (*Required*) # @option arguments [Integer] :y Y coordinate for the vector tile to search (*Required*) - # @option arguments [Boolean] :exact_bounds If +false+, the meta layer's feature is the bounding box of the tile. + # @option arguments [Boolean] :exact_bounds If `false`, the meta layer's feature is the bounding box of the tile. # If true, the meta layer's feature is a bounding box resulting from a # geo_bounds aggregation. The aggregation runs on values that intersect # the // tile with wrap_longitude set to false. The resulting # bounding box may be larger than the vector tile. # @option arguments [Integer] :extent The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. Server default: 4096. - # @option arguments [String] :grid_agg Aggregation used to create a grid for +field+. + # @option arguments [String] :grid_agg Aggregation used to create a grid for `field`. # @option arguments [Integer] :grid_precision Additional zoom levels available through the aggs layer. For example, if is 7 # and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results # don't include the aggs layer. Server default: 8. @@ -162,14 +164,14 @@ module Actions # of the cell. Server default: grid. # @option arguments [Integer] :size Maximum number of features to return in the hits layer. Accepts 0-10000. # If 0, results don't include the hits layer. Server default: 10000. - # @option arguments [Boolean] :with_labels If +true+, the hits and aggs layers will contain additional point features representing + # @option arguments [Boolean] :with_labels If `true`, the hits and aggs layers will contain additional point features representing # suggested label positions for the original features. - # - +Point+ and +MultiPoint+ features will have one of the points selected. - # - +Polygon+ and +MultiPolygon+ features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. - # - +LineString+ features will likewise provide a roughly central point selected from the triangle-tree. + # - `Point` and `MultiPoint` features will have one of the points selected. + # - `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. + # - `LineString` features will likewise provide a roughly central point selected from the triangle-tree. # - The aggregation results will provide one central point for each aggregation bucket. # All attributes from the original features will also be copied to the new label features. - # In addition, the new features will be distinguishable using the tag +_mvt_label_position+. + # In addition, the new features will be distinguishable using the tag `_mvt_label_position`. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/search_shards.rb b/elasticsearch-api/lib/elasticsearch/api/actions/search_shards.rb index 650d78a078..c33e3aa053 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/search_shards.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/search_shards.rb @@ -24,24 +24,24 @@ module Actions # Get the search shards. # Get the indices and shards that a search request would be run against. # This information can be useful for working out issues or planning optimizations with routing and shard preferences. - # When filtered aliases are used, the filter is returned as part of the +indices+ section. - # If the Elasticsearch security features are enabled, you must have the +view_index_metadata+ or +manage+ index privilege for the target data stream, index, or alias. + # When filtered aliases are used, the filter is returned as part of the `indices` section. + # If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases to search. - # It supports wildcards (+*+). - # To search all data streams and indices, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # It supports wildcards (`*`). + # To search all data streams and indices, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. # @option arguments [String, Array] :expand_wildcards Type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. Server default: open. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :local If +true+, the request retrieves information from the local node only. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. Server default: open. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # IT can also be set to +-1+ to indicate that the request should never timeout. Server default: 30s. + # IT can also be set to `-1` to indicate that the request should never timeout. Server default: 30s. # @option arguments [String] :preference The node or shard the operation should be performed on. # It is random by default. # @option arguments [String] :routing A custom value used to route operations to a specific shard. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/search_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/search_template.rb index eedc6b23d8..41b74617a6 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/search_template.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/search_template.rb @@ -24,28 +24,28 @@ module Actions # Run a search with a search template. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases to search. - # It supports wildcards (+*+). - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # It supports wildcards (`*`). + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. - # @option arguments [Boolean] :ccs_minimize_roundtrips If +true+, network round-trips are minimized for cross-cluster search requests. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. + # @option arguments [Boolean] :ccs_minimize_roundtrips If `true`, network round-trips are minimized for cross-cluster search requests. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # Supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. - # @option arguments [Boolean] :explain If +true+, the response includes additional details about score computation as part of a hit. - # @option arguments [Boolean] :ignore_throttled If +true+, specified concrete, expanded, or aliased indices are not included in the response when throttled. Server default: true. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. + # Supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + # @option arguments [Boolean] :explain If `true`, the response includes additional details about score computation as part of a hit. + # @option arguments [Boolean] :ignore_throttled If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. Server default: true. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. # @option arguments [String] :preference The node or shard the operation should be performed on. # It is random by default. - # @option arguments [Boolean] :profile If +true+, the query execution is profiled. + # @option arguments [Boolean] :profile If `true`, the query execution is profiled. # @option arguments [String] :routing A custom value used to route operations to a specific shard. # @option arguments [Time] :scroll Specifies how long a consistent view of the index # should be maintained for scrolled search. # @option arguments [String] :search_type The type of the search operation. - # @option arguments [Boolean] :rest_total_hits_as_int If +true+, +hits.total+ is rendered as an integer in the response. - # If +false+, it is rendered as an object. - # @option arguments [Boolean] :typed_keys If +true+, the response prefixes aggregation and suggester names with their respective types. + # @option arguments [Boolean] :rest_total_hits_as_int If `true`, `hits.total` is rendered as an integer in the response. + # If `false`, it is rendered as an object. + # @option arguments [Boolean] :typed_keys If `true`, the response prefixes aggregation and suggester names with their respective types. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/searchable_snapshots/clear_cache.rb b/elasticsearch-api/lib/elasticsearch/api/actions/searchable_snapshots/clear_cache.rb index 9f6e8b6232..658d9f925b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/searchable_snapshots/clear_cache.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/searchable_snapshots/clear_cache.rb @@ -30,9 +30,9 @@ module Actions # support SLA of official GA features. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases to clear from the cache. - # It supports wildcards (+*+). + # It supports wildcards (`*`). # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. - # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes +_all+ string or when no indices have been specified) + # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) # @option arguments [Boolean] :ignore_unavailable Whether specified concrete indices should be ignored when unavailable (missing or closed) # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/searchable_snapshots/mount.rb b/elasticsearch-api/lib/elasticsearch/api/actions/searchable_snapshots/mount.rb index 14cbc50f68..ee0e7b7d4f 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/searchable_snapshots/mount.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/searchable_snapshots/mount.rb @@ -31,7 +31,7 @@ module Actions # @option arguments [String] :snapshot The name of the snapshot of the index to mount. (*Required*) # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Boolean] :wait_for_completion If true, the request blocks until the operation is complete. # @option arguments [String] :storage The mount option for the searchable snapshot index. Server default: full_copy. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/activate_user_profile.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/activate_user_profile.rb index 1820a7862e..783279d572 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/activate_user_profile.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/activate_user_profile.rb @@ -26,12 +26,12 @@ module Actions # Create or update a user profile on behalf of another user. # NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. # Individual users and external applications should not call this API directly. - # The calling application must have either an +access_token+ or a combination of +username+ and +password+ for the user that the profile document is intended for. + # The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. # Elastic reserves the right to change or remove this feature in future releases without prior notice. - # This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including +username+, +full_name,+ +roles+, and the authentication realm. - # For example, in the JWT +access_token+ case, the profile user's +username+ is extracted from the JWT token claim pointed to by the +claims.principal+ setting of the JWT realm that authenticated the token. + # This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. + # For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. # When updating a profile document, the API enables the document if it was disabled. - # Any updates do not change existing content for either the +labels+ or +data+ fields. + # Any updates do not change existing content for either the `labels` or `data` fields. # # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_delete_role.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_delete_role.rb index 16181fe490..4b23d8a9df 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_delete_role.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_delete_role.rb @@ -26,7 +26,7 @@ module Actions # The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. # The bulk delete roles API cannot delete roles that are defined in roles files. # - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_put_role.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_put_role.rb index 464bdfe127..07c844a077 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_put_role.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_put_role.rb @@ -26,7 +26,7 @@ module Actions # The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. # The bulk create or update roles API cannot update roles that are defined in roles files. # - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_update_api_keys.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_update_api_keys.rb index 5fd4b57238..1153796104 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_update_api_keys.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/bulk_update_api_keys.rb @@ -28,9 +28,9 @@ module Actions # This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. # It is not possible to update expired or invalidated API keys. # This API supports updates to API key access scope, metadata and expiration. - # The access scope of each API key is derived from the +role_descriptors+ you specify in the request and a snapshot of the owner user's permissions at the time of the request. + # The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. # The snapshot of the owner's permissions is updated automatically on every call. - # IMPORTANT: If you don't specify +role_descriptors+ in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. + # IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. # A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. # # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/change_password.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/change_password.rb index 296ef4fd8a..5297340b03 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/change_password.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/change_password.rb @@ -27,7 +27,7 @@ module Actions # # @option arguments [String] :username The user whose password you want to change. If you do not specify this # parameter, the password is changed for the current user. - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_api_key_cache.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_api_key_cache.rb index e24496f632..858017fad4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_api_key_cache.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_api_key_cache.rb @@ -27,7 +27,7 @@ module Actions # The cache is also automatically cleared on state changes of the security index. # # @option arguments [String, Array] :ids Comma-separated list of API key IDs to evict from the API key cache. - # To evict all API keys, use +*+. + # To evict all API keys, use `*`. # Does not support other wildcard patterns. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_privileges.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_privileges.rb index 5007b253b7..2f98310633 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_privileges.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_privileges.rb @@ -27,7 +27,7 @@ module Actions # The cache is also automatically cleared for applications that have their privileges updated. # # @option arguments [String] :application A comma-separated list of applications. - # To clear all applications, use an asterism (+*+). + # To clear all applications, use an asterism (`*`). # It does not support other wildcard patterns. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_realms.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_realms.rb index b4df7c77b7..c34b678d13 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_realms.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_realms.rb @@ -30,7 +30,7 @@ module Actions # For more information, refer to the documentation about controlling the user cache. # # @option arguments [String, Array] :realms A comma-separated list of realms. - # To clear all realms, use an asterisk (+*+). + # To clear all realms, use an asterisk (`*`). # It does not support other wildcard patterns. (*Required*) # @option arguments [Array] :usernames A comma-separated list of the users to clear from the cache. # If you do not specify this parameter, the API evicts all users from the user cache. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_roles.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_roles.rb index cc4443f1ad..4729588d79 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_roles.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_roles.rb @@ -26,7 +26,7 @@ module Actions # Evict roles from the native role cache. # # @option arguments [String, Array] :name A comma-separated list of roles to evict from the role cache. - # To evict all roles, use an asterisk (+*+). + # To evict all roles, use an asterisk (`*`). # It does not support other wildcard patterns. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_service_tokens.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_service_tokens.rb index 9c75240a35..214a9a7c83 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_service_tokens.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/clear_cached_service_tokens.rb @@ -24,15 +24,15 @@ module Security module Actions # Clear service account token caches. # Evict a subset of all entries from the service account token caches. - # Two separate caches exist for service account tokens: one cache for tokens backed by the +service_tokens+ file, and another for tokens backed by the +.security+ index. + # Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. # This API clears matching entries from both caches. - # The cache for service account tokens backed by the +.security+ index is cleared automatically on state changes of the security index. - # The cache for tokens backed by the +service_tokens+ file is cleared automatically on file changes. + # The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. + # The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. # # @option arguments [String] :namespace The namespace, which is a top-level grouping of service accounts. (*Required*) # @option arguments [String] :service The name of the service, which must be unique within its namespace. (*Required*) # @option arguments [String, Array] :name A comma-separated list of token names to evict from the service account token caches. - # Use a wildcard (+*+) to evict all tokens that belong to a service account. + # Use a wildcard (`*`) to evict all tokens that belong to a service account. # It does not support other wildcard patterns. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/create_api_key.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/create_api_key.rb index 3810fba2bc..797484b01a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/create_api_key.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/create_api_key.rb @@ -32,7 +32,7 @@ module Actions # The API keys are created by the Elasticsearch API key service, which is automatically enabled. # To configure or turn off the API key service, refer to API key service setting documentation. # - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/create_cross_cluster_api_key.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/create_cross_cluster_api_key.rb index 939ec2f287..dd0c050641 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/create_cross_cluster_api_key.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/create_cross_cluster_api_key.rb @@ -23,11 +23,11 @@ module API module Security module Actions # Create a cross-cluster API key. - # Create an API key of the +cross_cluster+ type for the API key based remote cluster access. - # A +cross_cluster+ API key cannot be used to authenticate through the REST interface. + # Create an API key of the `cross_cluster` type for the API key based remote cluster access. + # A `cross_cluster` API key cannot be used to authenticate through the REST interface. # IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. # Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. - # NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the +access+ property. + # NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. # A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. # By default, API keys never expire. You can specify expiration information when you create the API keys. # Cross-cluster API keys can only be updated with the update cross-cluster API key API. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/create_service_token.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/create_service_token.rb index 3315f2d18f..32c3242385 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/create_service_token.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/create_service_token.rb @@ -31,9 +31,9 @@ module Actions # @option arguments [String] :service The name of the service. (*Required*) # @option arguments [String] :name The name for the service account token. # If omitted, a random name will be generated.Token names must be at least one and no more than 256 characters. - # They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (+-+), and underscores (+_+), but cannot begin with an underscore.NOTE: Token names must be unique in the context of the associated service account. - # They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as +//+. - # @option arguments [String] :refresh If +true+ then refresh the affected shards to make this operation visible to search, if +wait_for+ (the default) then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore.NOTE: Token names must be unique in the context of the associated service account. + # They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. + # @option arguments [String] :refresh If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/delegate_pki.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/delegate_pki.rb index e02386aaf6..8eecd8748a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/delegate_pki.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/delegate_pki.rb @@ -24,8 +24,8 @@ module Security module Actions # Delegate PKI authentication. # This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. - # The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has +delegation.enabled+ set to +true+. - # A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw +username_pattern+ of the respective realm. + # The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. + # A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. # This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. # IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. # This is part of the TLS authentication process and it is delegated to the proxy that calls this API. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_privileges.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_privileges.rb index 494c5608e8..0fb96bea81 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_privileges.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_privileges.rb @@ -24,13 +24,13 @@ module Security module Actions # Delete application privileges. # To use this API, you must have one of the following privileges: - # * The +manage_security+ cluster privilege (or a greater privilege such as +all+). + # * The `manage_security` cluster privilege (or a greater privilege such as `all`). # * The "Manage Application Privileges" global privilege for the application being referenced in the request. # # @option arguments [String] :application The name of the application. # Application privileges are always associated with exactly one application. (*Required*) # @option arguments [String, Array] :name The name of the privilege. (*Required*) - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_role.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_role.rb index b1d73aa58c..19f8bc44ef 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_role.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_role.rb @@ -28,7 +28,7 @@ module Actions # The delete roles API cannot remove roles that are defined in roles files. # # @option arguments [String] :name The name of the role. (*Required*) - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_role_mapping.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_role_mapping.rb index b8a11535d0..b6bd17290e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_role_mapping.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_role_mapping.rb @@ -29,7 +29,7 @@ module Actions # # @option arguments [String] :name The distinct name that identifies the role mapping. # The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. (*Required*) - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_service_token.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_service_token.rb index 67b74f02ad..33137aaa43 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_service_token.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_service_token.rb @@ -28,7 +28,7 @@ module Actions # @option arguments [String] :namespace The namespace, which is a top-level grouping of service accounts. (*Required*) # @option arguments [String] :service The service name. (*Required*) # @option arguments [String] :name The name of the service account token. (*Required*) - # @option arguments [String] :refresh If +true+ then refresh the affected shards to make this operation visible to search, if +wait_for+ (the default) then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_user.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_user.rb index 614c4c6501..bf132dcb95 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_user.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/delete_user.rb @@ -26,7 +26,7 @@ module Actions # Delete users from the native realm. # # @option arguments [String] :username An identifier for the user. (*Required*) - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/disable_user.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/disable_user.rb index 1111e9af70..b5f6dbc3a9 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/disable_user.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/disable_user.rb @@ -28,7 +28,7 @@ module Actions # You can use this API to revoke a user's access to Elasticsearch. # # @option arguments [String] :username An identifier for the user. (*Required*) - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/enable_user.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/enable_user.rb index cf5fefa7da..1acea0dd87 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/enable_user.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/enable_user.rb @@ -27,7 +27,7 @@ module Actions # By default, when you create users, they are enabled. # # @option arguments [String] :username An identifier for the user. (*Required*) - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_api_key.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_api_key.rb index 259c0643a8..b7eb9f1bda 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_api_key.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_api_key.rb @@ -24,25 +24,25 @@ module Security module Actions # Get API key information. # Retrieves information for one or more API keys. - # NOTE: If you have only the +manage_own_api_key+ privilege, this API returns only the API keys that you own. - # If you have +read_security+, +manage_api_key+ or greater privileges (including +manage_security+), this API returns all API keys regardless of ownership. + # NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. + # If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. # # @option arguments [String] :id An API key id. - # This parameter cannot be used with any of +name+, +realm_name+ or +username+. + # This parameter cannot be used with any of `name`, `realm_name` or `username`. # @option arguments [String] :name An API key name. - # This parameter cannot be used with any of +id+, +realm_name+ or +username+. + # This parameter cannot be used with any of `id`, `realm_name` or `username`. # It supports prefix search with wildcard. # @option arguments [Boolean] :owner A boolean flag that can be used to query API keys owned by the currently authenticated user. - # The +realm_name+ or +username+ parameters cannot be specified when this parameter is set to +true+ as they are assumed to be the currently authenticated ones. + # The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. # @option arguments [String] :realm_name The name of an authentication realm. - # This parameter cannot be used with either +id+ or +name+ or when +owner+ flag is set to +true+. + # This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. # @option arguments [String] :username The username of a user. - # This parameter cannot be used with either +id+ or +name+ or when +owner+ flag is set to +true+. + # This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. # @option arguments [Boolean] :with_limited_by Return the snapshot of the owner user's role descriptors # associated with the API key. An API key's actual # permission is the intersection of its assigned role # descriptors and the owner user's role descriptors. - # @option arguments [Boolean] :active_only A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as +owner+ or +name+. If +active_only+ is false, the response will include both active and inactive (expired or invalidated) keys. + # @option arguments [Boolean] :active_only A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. # @option arguments [Boolean] :with_profile_uid Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_privileges.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_privileges.rb index e592c8bf51..af7a7b1667 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_privileges.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_privileges.rb @@ -24,7 +24,7 @@ module Security module Actions # Get application privileges. # To use this API, you must have one of the following privileges: - # * The +read_security+ cluster privilege (or a greater privilege such as +manage_security+ or +all+). + # * The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). # * The "Manage Application Privileges" global privilege for the application being referenced in the request. # # @option arguments [String] :application The name of the application. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_service_accounts.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_service_accounts.rb index 869557efd2..a19eb3835d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_service_accounts.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_service_accounts.rb @@ -24,13 +24,13 @@ module Security module Actions # Get service accounts. # Get a list of service accounts that match the provided path parameters. - # NOTE: Currently, only the +elastic/fleet-server+ service account is available. + # NOTE: Currently, only the `elastic/fleet-server` service account is available. # # @option arguments [String] :namespace The name of the namespace. # Omit this parameter to retrieve information about all service accounts. - # If you omit this parameter, you must also omit the +service+ parameter. + # If you omit this parameter, you must also omit the `service` parameter. # @option arguments [String] :service The service name. - # Omit this parameter to retrieve information about all service accounts that belong to the specified +namespace+. + # Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_service_credentials.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_service_credentials.rb index b3e62afdb7..291826d3a8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_service_credentials.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_service_credentials.rb @@ -23,9 +23,9 @@ module API module Security module Actions # Get service account credentials. - # To use this API, you must have at least the +read_security+ cluster privilege (or a greater privilege such as +manage_service_account+ or +manage_security+). + # To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). # The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. - # NOTE: For tokens backed by the +service_tokens+ file, the API collects them from all nodes of the cluster. + # NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. # Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. # # @option arguments [String] :namespace The name of the namespace. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_settings.rb index 3f880b7671..95e4040ef3 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_settings.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_settings.rb @@ -23,11 +23,11 @@ module API module Security module Actions # Get security index settings. - # Get the user-configurable settings for the security internal index (+.security+ and associated indices). + # Get the user-configurable settings for the security internal index (`.security` and associated indices). # Only a subset of the index settings — those that are user-configurable—will be shown. # This includes: - # * +index.auto_expand_replicas+ - # * +index.number_of_replicas+ + # * `index.auto_expand_replicas` + # * `index.number_of_replicas` # # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_token.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_token.rb index 18ec4c54fd..b6afbd208c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_token.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_token.rb @@ -25,12 +25,12 @@ module Actions # Get a token. # Create a bearer token for access without requiring basic authentication. # The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. - # Alternatively, you can explicitly enable the +xpack.security.authc.token.enabled+ setting. + # Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. # When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. # The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. # A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. # The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. - # That time period is defined by the +xpack.security.authc.token.timeout+ setting. + # That time period is defined by the `xpack.security.authc.token.timeout` setting. # If you want to invalidate a token immediately, you can do so by using the invalidate token API. # # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_user_profile.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_user_profile.rb index 5ff39cbae5..633ebcec57 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/get_user_profile.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/get_user_profile.rb @@ -29,10 +29,10 @@ module Actions # Elastic reserves the right to change or remove this feature in future releases without prior notice. # # @option arguments [Userprofileid] :uid A unique identifier for the user profile. (*Required*) - # @option arguments [String] :data A comma-separated list of filters for the +data+ field of the profile document. - # To return all content use +data=*+. - # To return a subset of content use +data=+ to retrieve content nested under the specified ++. - # By default returns no +data+ content. + # @option arguments [String] :data A comma-separated list of filters for the `data` field of the profile document. + # To return all content use `data=*`. + # To return a subset of content use `data=` to retrieve content nested under the specified ``. + # By default returns no `data` content. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/invalidate_api_key.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/invalidate_api_key.rb index 951f2ca7a0..7bbebe8712 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/invalidate_api_key.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/invalidate_api_key.rb @@ -25,14 +25,14 @@ module Actions # Invalidate API keys. # This API invalidates API keys created by the create API key or grant API key APIs. # Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. - # To use this API, you must have at least the +manage_security+, +manage_api_key+, or +manage_own_api_key+ cluster privileges. - # The +manage_security+ privilege allows deleting any API key, including both REST and cross cluster API keys. - # The +manage_api_key+ privilege allows deleting any REST API key, but not cross cluster API keys. - # The +manage_own_api_key+ only allows deleting REST API keys that are owned by the user. - # In addition, with the +manage_own_api_key+ privilege, an invalidation request must be issued in one of the three formats: - # - Set the parameter +owner=true+. - # - Or, set both +username+ and +realm_name+ to match the user's identity. - # - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the +ids+ field. + # To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. + # The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. + # The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. + # The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. + # In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: + # - Set the parameter `owner=true`. + # - Or, set both `username` and `realm_name` to match the user's identity. + # - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. # # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/invalidate_token.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/invalidate_token.rb index a10a433d20..e581ddf60c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/invalidate_token.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/invalidate_token.rb @@ -25,13 +25,13 @@ module Actions # Invalidate a token. # The access tokens returned by the get token API have a finite period of time for which they are valid. # After that time period, they can no longer be used. - # The time period is defined by the +xpack.security.authc.token.timeout+ setting. + # The time period is defined by the `xpack.security.authc.token.timeout` setting. # The refresh tokens returned by the get token API are only valid for 24 hours. # They can also be used exactly once. # If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. # NOTE: While all parameters are optional, at least one of them is required. - # More specifically, either one of +token+ or +refresh_token+ parameters is required. - # If none of these two are specified, then +realm_name+ and/or +username+ need to be specified. + # More specifically, either one of `token` or `refresh_token` parameters is required. + # If none of these two are specified, then `realm_name` and/or `username` need to be specified. # # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/oidc_logout.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/oidc_logout.rb index e0faf9bbfb..2182a67738 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/oidc_logout.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/oidc_logout.rb @@ -23,7 +23,7 @@ module API module Security module Actions # Logout of OpenID Connect. - # Invalidate an access token and a refresh token that were generated as a response to the +/_security/oidc/authenticate+ API. + # Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. # If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. # Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. # These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/put_privileges.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/put_privileges.rb index fc6c9765ab..be14427e18 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/put_privileges.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/put_privileges.rb @@ -24,19 +24,19 @@ module Security module Actions # Create or update application privileges. # To use this API, you must have one of the following privileges: - # * The +manage_security+ cluster privilege (or a greater privilege such as +all+). + # * The `manage_security` cluster privilege (or a greater privilege such as `all`). # * The "Manage Application Privileges" global privilege for the application being referenced in the request. # Application names are formed from a prefix, with an optional suffix that conform to the following rules: # * The prefix must begin with a lowercase ASCII letter. # * The prefix must contain only ASCII letters or digits. # * The prefix must be at least 3 characters long. - # * If the suffix exists, it must begin with either a dash +-+ or +_+. - # * The suffix cannot contain any of the following characters: +\+, +/+, +*+, +?+, +"+, +<+, +>+, +|+, +,+, +*+. + # * If the suffix exists, it must begin with either a dash `-` or `_`. + # * The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. # * No part of the name can contain whitespace. - # Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters +_+, +-+, and +.+. - # Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: +/+, +*+, +:+. + # Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. + # Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. # - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body privileges # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/put_role.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/put_role.rb index 70bddd7c69..ca90ad992e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/put_role.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/put_role.rb @@ -28,7 +28,7 @@ module Actions # File-based role management is not available in Elastic Serverless. # # @option arguments [String] :name The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. (*Required*) - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/put_role_mapping.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/put_role_mapping.rb index 7e6c1f3bf3..0bc29c352b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/put_role_mapping.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/put_role_mapping.rb @@ -30,10 +30,10 @@ module Actions # Roles can be created by using the create or update roles API or roles files. # **Role templates** # The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. - # For example, all users in the +cn=admin,dc=example,dc=com+ LDAP group should be given the superuser role in Elasticsearch. - # The +roles+ field is used for this purpose. + # For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. + # The `roles` field is used for this purpose. # For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. - # The +role_templates+ field is used for this purpose. + # The `role_templates` field is used for this purpose. # NOTE: To use role templates successfully, the relevant scripting feature must be enabled. # Otherwise, all attempts to create a role mapping with role templates fail. # All of the user fields that are available in the role mapping rules are also available in the role templates. @@ -43,7 +43,7 @@ module Actions # # @option arguments [String] :name The distinct name that identifies the role mapping. # The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. (*Required*) - # @option arguments [String] :refresh If +true+ (the default) then refresh the affected shards to make this operation visible to search, if +wait_for+ then wait for a refresh to make this operation visible to search, if +false+ then do nothing with refreshes. + # @option arguments [String] :refresh If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/put_user.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/put_user.rb index a525745c97..a5622977f1 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/put_user.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/put_user.rb @@ -30,7 +30,7 @@ module Actions # @option arguments [String] :username An identifier for the user.NOTE: Usernames must be at least 1 and no more than 507 characters. # They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. # Leading or trailing whitespace is not allowed. (*Required*) - # @option arguments [String] :refresh Valid values are +true+, +false+, and +wait_for+. + # @option arguments [String] :refresh Valid values are `true`, `false`, and `wait_for`. # These values have the same meaning as in the index API, but the default value for this API is true. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/query_api_keys.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/query_api_keys.rb index 19812f21cd..3208457cef 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/query_api_keys.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/query_api_keys.rb @@ -25,15 +25,15 @@ module Actions # Find API keys with a query. # Get a paginated list of API keys and their information. # You can optionally filter the results with a query. - # To use this API, you must have at least the +manage_own_api_key+ or the +read_security+ cluster privileges. - # If you have only the +manage_own_api_key+ privilege, this API returns only the API keys that you own. - # If you have the +read_security+, +manage_api_key+, or greater privileges (including +manage_security+), this API returns all API keys regardless of ownership. + # To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. + # If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. + # If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. # # @option arguments [Boolean] :with_limited_by Return the snapshot of the owner user's role descriptors associated with the API key. # An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). - # An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has +manage_api_key+ or higher privileges. + # An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. # @option arguments [Boolean] :with_profile_uid Determines whether to also retrieve the profile UID for the API key owner principal. - # If it exists, the profile UID is returned under the +profile_uid+ response field for each API key. + # If it exists, the profile UID is returned under the `profile_uid` response field for each API key. # @option arguments [Boolean] :typed_keys Determines whether aggregation names are prefixed by their respective types in the response. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_authenticate.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_authenticate.rb index 4bdbef1763..d632e9b101 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_authenticate.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_authenticate.rb @@ -29,7 +29,7 @@ module Actions # The SAML message that is submitted can be: # * A response to a SAML authentication request that was previously created using the SAML prepare authentication API. # * An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. - # In either case, the SAML message needs to be a base64 encoded XML document with a root element of ++. + # In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. # After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. # This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_invalidate.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_invalidate.rb index 5a80c1adcd..b0ec1d1ebe 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_invalidate.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_invalidate.rb @@ -27,7 +27,7 @@ module Actions # NOTE: This API is intended for use by custom web applications other than Kibana. # If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. # The logout request comes from the SAML IdP during an IdP initiated Single Logout. - # The custom web application can use this API to have Elasticsearch process the +LogoutRequest+. + # The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. # After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. # Thus the user can be redirected back to their IdP. # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_prepare_authentication.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_prepare_authentication.rb index 5a634cbae0..7bb0ec405e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_prepare_authentication.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/saml_prepare_authentication.rb @@ -23,13 +23,13 @@ module API module Security module Actions # Prepare SAML authentication. - # Create a SAML authentication request (++) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. + # Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. # NOTE: This API is intended for use by custom web applications other than Kibana. # If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. # This API returns a URL pointing to the SAML Identity Provider. # You can use the URL to redirect the browser of the user in order to continue the authentication process. - # The URL includes a single parameter named +SAMLRequest+, which contains a SAML Authentication request that is deflated and Base64 encoded. - # If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named +SigAlg+ and +Signature+. + # The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. + # If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. # These parameters contain the algorithm used for the signature and the signature value itself. # It also returns a random string that uniquely identifies this SAML Authentication request. # The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/suggest_user_profiles.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/suggest_user_profiles.rb index 87e497b3c2..d94e384ab4 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/suggest_user_profiles.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/suggest_user_profiles.rb @@ -28,11 +28,11 @@ module Actions # Individual users and external applications should not call this API directly. # Elastic reserves the right to change or remove this feature in future releases without prior notice. # - # @option arguments [String] :data A comma-separated list of filters for the +data+ field of the profile document. - # To return all content use +data=*+. - # To return a subset of content, use +data=+ to retrieve content nested under the specified ++. - # By default, the API returns no +data+ content. - # It is an error to specify +data+ as both the query parameter and the request body field. + # @option arguments [String] :data A comma-separated list of filters for the `data` field of the profile document. + # To return all content use `data=*`. + # To return a subset of content, use `data=` to retrieve content nested under the specified ``. + # By default, the API returns no `data` content. + # It is an error to specify `data` as both the query parameter and the request body field. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/update_api_key.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/update_api_key.rb index b272f66cd5..b671775c45 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/update_api_key.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/update_api_key.rb @@ -25,16 +25,16 @@ module Actions # Update an API key. # Update attributes of an existing API key. # This API supports updates to an API key's access scope, expiration, and metadata. - # To use this API, you must have at least the +manage_own_api_key+ cluster privilege. + # To use this API, you must have at least the `manage_own_api_key` cluster privilege. # Users can only update API keys that they created or that were granted to them. - # To update another user’s API key, use the +run_as+ feature to submit a request on behalf of another user. + # To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. # IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. # Use this API to update API keys created by the create API key or grant API Key APIs. # If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. # It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. - # The access scope of an API key is derived from the +role_descriptors+ you specify in the request and a snapshot of the owner user's permissions at the time of the request. + # The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. # The snapshot of the owner's permissions is updated automatically on every call. - # IMPORTANT: If you don't specify +role_descriptors+ in the request, a call to this API might still change the API key's access scope. + # IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. # This change can occur if the owner user's permissions have changed since the API key was created or last modified. # # @option arguments [String] :id The ID of the API key to update. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/update_cross_cluster_api_key.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/update_cross_cluster_api_key.rb index ee6ff96b8c..0fe7b04748 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/update_cross_cluster_api_key.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/update_cross_cluster_api_key.rb @@ -24,14 +24,14 @@ module Security module Actions # Update a cross-cluster API key. # Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. - # To use this API, you must have at least the +manage_security+ cluster privilege. + # To use this API, you must have at least the `manage_security` cluster privilege. # Users can only update API keys that they created. - # To update another user's API key, use the +run_as+ feature to submit a request on behalf of another user. + # To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. # IMPORTANT: It's not possible to use an API key as the authentication credential for this API. # To update an API key, the owner user's credentials are required. # It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. # This API supports updates to an API key's access scope, metadata, and expiration. - # The owner user's information, such as the +username+ and +realm+, is also updated automatically on every call. + # The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. # NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. # # @option arguments [String] :id The ID of the cross-cluster API key to update. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/update_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/update_settings.rb index 5d26dbb8f3..5d71f50a77 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/update_settings.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/update_settings.rb @@ -23,8 +23,8 @@ module API module Security module Actions # Update security index settings. - # Update the user-configurable settings for the security internal index (+.security+ and associated indices). Only a subset of settings are allowed to be modified. This includes +index.auto_expand_replicas+ and +index.number_of_replicas+. - # NOTE: If +index.auto_expand_replicas+ is set, +index.number_of_replicas+ will be ignored during updates. + # Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + # NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. # If a specific index is not in use on the system and settings are provided for it, the request will be rejected. # This API does not yet support configuring the settings for indices before they are in use. # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/security/update_user_profile_data.rb b/elasticsearch-api/lib/elasticsearch/api/actions/security/update_user_profile_data.rb index 4344974745..50c5a3d174 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/security/update_user_profile_data.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/security/update_user_profile_data.rb @@ -28,12 +28,12 @@ module Actions # Individual users and external applications should not call this API directly. # Elastic reserves the right to change or remove this feature in future releases without prior notice. # To use this API, you must have one of the following privileges: - # * The +manage_user_profile+ cluster privilege. - # * The +update_profile_data+ global privilege for the namespaces that are referenced in the request. - # This API updates the +labels+ and +data+ fields of an existing user profile document with JSON objects. + # * The `manage_user_profile` cluster privilege. + # * The `update_profile_data` global privilege for the namespaces that are referenced in the request. + # This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. # New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. # For both labels and data, content is namespaced by the top-level fields. - # The +update_profile_data+ global privilege grants privileges for updating only the allowed namespaces. + # The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. # # @option arguments [String] :uid A unique identifier for the user profile. (*Required*) # @option arguments [Integer] :if_seq_no Only perform the operation if the document has this sequence number. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/cleanup_repository.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/cleanup_repository.rb index d1dcfb8519..71dbdde345 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/cleanup_repository.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/cleanup_repository.rb @@ -28,10 +28,10 @@ module Actions # @option arguments [String] :repository The name of the snapshot repository to clean up. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+ Server default: 30s. + # To indicate that the request should never timeout, set it to `-1` Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. # If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/clone.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/clone.rb index 6774ae513f..5483585dcb 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/clone.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/clone.rb @@ -30,7 +30,7 @@ module Actions # @option arguments [String] :target_snapshot The target snapshot name. (*Required*) # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/create.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/create.rb index 2a33ff7284..5bdd548729 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/create.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/create.rb @@ -31,8 +31,8 @@ module Actions # It must be unique in the repository. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. - # @option arguments [Boolean] :wait_for_completion If +true+, the request returns a response when the snapshot is complete. - # If +false+, the request returns a response when the snapshot initializes. + # @option arguments [Boolean] :wait_for_completion If `true`, the request returns a response when the snapshot is complete. + # If `false`, the request returns a response when the snapshot initializes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/create_repository.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/create_repository.rb index fa31b46cc2..eb1d62f354 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/create_repository.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/create_repository.rb @@ -25,19 +25,19 @@ module Actions # Create or update a snapshot repository. # IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. # To register a snapshot repository, the cluster's global metadata must be writeable. - # Ensure there are no cluster blocks (for example, +cluster.blocks.read_only+ and +clsuter.blocks.read_only_allow_delete+ settings) that prevent write access. + # Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. # Several options for this API can be specified using a query parameter or a request body parameter. # If both parameters are specified, only the query parameter is used. # # @option arguments [String] :repository The name of the snapshot repository to register or update. (*Required*) # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. # If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. - # @option arguments [Boolean] :verify If +true+, the request verifies the repository is functional on all master and data nodes in the cluster. - # If +false+, this verification is skipped. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. + # @option arguments [Boolean] :verify If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. + # If `false`, this verification is skipped. # You can also perform this verification with the verify snapshot repository API. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body repository diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/delete.rb index 1f47f0c2ae..02a8d45ba1 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/delete.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/delete.rb @@ -26,10 +26,10 @@ module Actions # # @option arguments [String] :repository The name of the repository to delete a snapshot from. (*Required*) # @option arguments [String] :snapshot A comma-separated list of snapshot names to delete. - # It also accepts wildcards (+*+). (*Required*) + # It also accepts wildcards (`*`). (*Required*) # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/delete_repository.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/delete_repository.rb index 86859d8018..c052876319 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/delete_repository.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/delete_repository.rb @@ -27,13 +27,13 @@ module Actions # The snapshots themselves are left untouched and in place. # # @option arguments [String, Array] :repository The ame of the snapshot repositories to unregister. - # Wildcard (+*+) patterns are supported. (*Required*) + # Wildcard (`*`) patterns are supported. (*Required*) # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. # If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/get.rb index 2764523ef5..3e4d2a5848 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/get.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/get.rb @@ -23,40 +23,40 @@ module API module Snapshot module Actions # Get snapshot information. - # NOTE: The +after+ parameter and +next+ field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. + # NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. # It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. # Snapshots concurrently created may be seen during an iteration. # # @option arguments [String] :repository A comma-separated list of snapshot repository names used to limit the request. - # Wildcard (+*+) expressions are supported. (*Required*) + # Wildcard (`*`) expressions are supported. (*Required*) # @option arguments [String, Array] :snapshot A comma-separated list of snapshot names to retrieve - # Wildcards (+*+) are supported. - # - To get information about all snapshots in a registered repository, use a wildcard (+*+) or +_all+. - # - To get information about any snapshots that are currently running, use +_current+. (*Required*) + # Wildcards (`*`) are supported. + # - To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. + # - To get information about any snapshots that are currently running, use `_current`. (*Required*) # @option arguments [String] :after An offset identifier to start pagination from as returned by the next field in the response body. # @option arguments [String] :from_sort_value The value of the current sort column at which to start retrieval. - # It can be a string +snapshot-+ or a repository name when sorting by snapshot or repository name. - # It can be a millisecond time value or a number when sorting by +index-+ or shard count. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error for any snapshots that are unavailable. - # @option arguments [Boolean] :index_details If +true+, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. - # The default is +false+, meaning that this information is omitted. - # @option arguments [Boolean] :index_names If +true+, the response includes the name of each index in each snapshot. Server default: true. - # @option arguments [Boolean] :include_repository If +true+, the response includes the repository name in each snapshot. Server default: true. + # It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. + # It can be a millisecond time value or a number when sorting by `index-` or shard count. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error for any snapshots that are unavailable. + # @option arguments [Boolean] :index_details If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. + # The default is `false`, meaning that this information is omitted. + # @option arguments [Boolean] :index_names If `true`, the response includes the name of each index in each snapshot. Server default: true. + # @option arguments [Boolean] :include_repository If `true`, the response includes the repository name in each snapshot. Server default: true. # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [String] :order The sort order. - # Valid values are +asc+ for ascending and +desc+ for descending order. + # Valid values are `asc` for ascending and `desc` for descending order. # The default behavior is ascending order. Server default: asc. # @option arguments [Integer] :offset Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. Server default: 0. # @option arguments [Integer] :size The maximum number of snapshots to return. # The default is 0, which means to return all that match the request without limit. Server default: 0. - # @option arguments [String] :slm_policy_filter Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to.You can use wildcards (+*+) and combinations of wildcards followed by exclude patterns starting with +-+. - # For example, the pattern +*,-policy-a-\*+ will return all snapshots except for those that were created by an SLM policy with a name starting with +policy-a-+. - # Note that the wildcard pattern +*+ matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. - # To include snapshots that were not created by an SLM policy, you can use the special pattern +_none+ that will match all snapshots without an SLM policy. + # @option arguments [String] :slm_policy_filter Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to.You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. + # For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. + # Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. + # To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. # @option arguments [String] :sort The sort order for the result. # The default behavior is sorting by snapshot start time stamp. Server default: start_time. - # @option arguments [Boolean] :verbose If +true+, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted.NOTE: The parameters +size+, +order+, +after+, +from_sort_value+, +offset+, +slm_policy_filter+, and +sort+ are not supported when you set +verbose=false+ and the sort order for requests with +verbose=false+ is undefined. Server default: true. + # @option arguments [Boolean] :verbose If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted.NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/get_repository.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/get_repository.rb index cd28d14211..ddfe682cfc 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/get_repository.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/get_repository.rb @@ -25,12 +25,12 @@ module Actions # Get snapshot repository information. # # @option arguments [String, Array] :repository A comma-separated list of snapshot repository names used to limit the request. - # Wildcard (+*+) expressions are supported including combining wildcards with exclude patterns starting with +-+.To get information about all snapshot repositories registered in the cluster, omit this parameter or use +*+ or +_all+. - # @option arguments [Boolean] :local If +true+, the request gets information from the local node only. - # If +false+, the request gets information from the master node. + # Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`.To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. + # @option arguments [Boolean] :local If `true`, the request gets information from the local node only. + # If `false`, the request gets information from the master node. # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: to 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: to 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/repository_analyze.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/repository_analyze.rb index 2f2a4e15a5..bfd3ba4441 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/repository_analyze.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/repository_analyze.rb @@ -30,8 +30,8 @@ module Actions # Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. # The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. # Run your first analysis with the default parameter values to check for simple problems. - # If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a +blob_count+ of at least +2000+, a +max_blob_size+ of at least +2gb+, a +max_total_data_size+ of at least +1tb+, and a +register_operation_count+ of at least +100+. - # Always specify a generous timeout, possibly +1h+ or longer, to allow time for each analysis to run to completion. + # If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. + # Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. # Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. # If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. # This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. @@ -60,7 +60,7 @@ module Actions # IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. # This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. # You must ensure this load does not affect other users of these systems. - # Analyses respect the repository settings +max_snapshot_bytes_per_sec+ and +max_restore_bytes_per_sec+ if available and the cluster setting +indices.recovery.max_bytes_per_sec+ which you can use to limit the bandwidth they consume. + # Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. # NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. # NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. # A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. @@ -69,10 +69,10 @@ module Actions # NOTE: This API may not work correctly in a mixed-version cluster. # *Implementation details* # NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. - # The analysis comprises a number of blob-level tasks, as set by the +blob_count+ parameter and a number of compare-and-exchange operations on linearizable registers, as set by the +register_operation_count+ parameter. + # The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. # These tasks are distributed over the data and master-eligible nodes in the cluster for execution. # For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. - # The size of the blob is chosen randomly, according to the +max_blob_size+ and +max_total_data_size+ parameters. + # The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. # If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. # For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. # These reads are permitted to fail, but must not return partial data. @@ -97,21 +97,21 @@ module Actions # # @option arguments [String] :repository The name of the repository. (*Required*) # @option arguments [Integer] :blob_count The total number of blobs to write to the repository during the test. - # For realistic experiments, you should set it to at least +2000+. Server default: 100. + # For realistic experiments, you should set it to at least `2000`. Server default: 100. # @option arguments [Integer] :concurrency The number of operations to run concurrently during the test. Server default: 10. # @option arguments [Boolean] :detailed Indicates whether to return detailed results, including timing information for every operation performed during the analysis. # If false, it returns only a summary of the analysis. # @option arguments [Integer] :early_read_node_count The number of nodes on which to perform an early read operation while writing each blob. # Early read operations are only rarely performed. Server default: 2. # @option arguments [Integer, String] :max_blob_size The maximum size of a blob to be written during the test. - # For realistic experiments, you should set it to at least +2gb+. Server default: 10mb. + # For realistic experiments, you should set it to at least `2gb`. Server default: 10mb. # @option arguments [Integer, String] :max_total_data_size An upper limit on the total size of all the blobs written during the test. - # For realistic experiments, you should set it to at least +1tb+. Server default: 1gb. + # For realistic experiments, you should set it to at least `1tb`. Server default: 1gb. # @option arguments [Float] :rare_action_probability The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. Server default: 0.02. # @option arguments [Boolean] :rarely_abort_writes Indicates whether to rarely cancel writes before they complete. Server default: true. # @option arguments [Integer] :read_node_count The number of nodes on which to read a blob after writing. Server default: 10. # @option arguments [Integer] :register_operation_count The minimum number of linearizable register operations to perform in total. - # For realistic experiments, you should set it to at least +100+. Server default: 10. + # For realistic experiments, you should set it to at least `100`. Server default: 10. # @option arguments [Integer] :seed The seed for the pseudo-random number generator used to generate the list of operations performed during the test. # To repeat the same set of operations in multiple experiments, use the same seed in each experiment. # Note that the operations are performed concurrently so might not always happen in the same order on each run. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/restore.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/restore.rb index f081373e41..30b3ba244e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/restore.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/restore.rb @@ -29,9 +29,11 @@ module Actions # The snapshot and cluster versions must be compatible. # To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. # Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: - # + + # + # ``` # GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream - # + + # ``` + # # If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. # If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. # @@ -39,10 +41,10 @@ module Actions # @option arguments [String] :snapshot The name of the snapshot to restore. (*Required*) # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. - # @option arguments [Boolean] :wait_for_completion If +true+, the request returns a response when the restore operation completes. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. + # @option arguments [Boolean] :wait_for_completion If `true`, the request returns a response when the restore operation completes. # The operation is complete when it finishes all attempts to recover primary shards for restored indices. - # This applies even if one or more of the recovery attempts fail.If +false+, the request returns a response when the restore operation initializes. + # This applies even if one or more of the recovery attempts fail.If `false`, the request returns a response when the restore operation initializes. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/status.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/status.rb index d270d55221..dc532b018b 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/status.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/status.rb @@ -26,9 +26,9 @@ module Actions # Get a detailed description of the current state for each shard participating in the snapshot. # Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. # If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. - # If you omit the ++ request path parameter, the request retrieves information only for currently running snapshots. + # If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. # This usage is preferred. - # If needed, you can specify ++ and ++ to retrieve information for specific snapshots, even if they're not currently running. + # If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. # WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. # The API requires a read from the repository for each shard in each snapshot. # For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). @@ -36,15 +36,15 @@ module Actions # These requests can also tax machine resources and, when using cloud storage, incur high processing costs. # # @option arguments [String] :repository The snapshot repository name used to limit the request. - # It supports wildcards (+*+) if ++ isn't specified. + # It supports wildcards (`*`) if `` isn't specified. # @option arguments [String, Array] :snapshot A comma-separated list of snapshots to retrieve status for. # The default is currently running snapshots. - # Wildcards (+*+) are not supported. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error for any snapshots that are unavailable. - # If +true+, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. + # Wildcards (`*`) are not supported. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error for any snapshots that are unavailable. + # If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/verify_repository.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/verify_repository.rb index aea20ebcec..4bd0c9bf21 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/verify_repository.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot/verify_repository.rb @@ -28,10 +28,10 @@ module Actions # @option arguments [String] :repository The name of the snapshot repository to verify. (*Required*) # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. # If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/get_status.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/get_status.rb index 335cf26ccf..e3ff0b3af8 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/get_status.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/get_status.rb @@ -26,10 +26,10 @@ module Actions # # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/put_lifecycle.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/put_lifecycle.rb index e0158652bb..2e6e4f71ff 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/put_lifecycle.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/put_lifecycle.rb @@ -30,10 +30,10 @@ module Actions # @option arguments [String] :policy_id The identifier for the snapshot lifecycle policy you want to create or update. (*Required*) # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/start.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/start.rb index 6eacac6a51..8ac4128b7a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/start.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/start.rb @@ -28,10 +28,10 @@ module Actions # # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/stop.rb b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/stop.rb index ada3fd17e9..04f6065046 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/stop.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/snapshot_lifecycle_management/stop.rb @@ -32,10 +32,10 @@ module Actions # # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Time] :timeout The period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/sql/delete_async.rb b/elasticsearch-api/lib/elasticsearch/api/actions/sql/delete_async.rb index d55dedd5a8..e236471d57 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/sql/delete_async.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/sql/delete_async.rb @@ -26,7 +26,7 @@ module Actions # Delete an async SQL search or a stored synchronous SQL search. # If the search is still running, the API cancels it. # If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: - # * Users with the +cancel_task+ cluster privilege. + # * Users with the `cancel_task` cluster privilege. # * The user who first submitted the search. # # @option arguments [String] :id The identifier for the search. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/sql/get_async.rb b/elasticsearch-api/lib/elasticsearch/api/actions/sql/get_async.rb index be93a52c91..958a311a67 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/sql/get_async.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/sql/get_async.rb @@ -30,10 +30,10 @@ module Actions # @option arguments [String] :delimiter The separator for CSV results. # The API supports this parameter only for CSV responses. Server default: ,. # @option arguments [String] :format The format for the response. - # You must specify a format using this parameter or the +Accept+ HTTP header. + # You must specify a format using this parameter or the `Accept` HTTP header. # If you specify both, the API uses this parameter. # @option arguments [Time] :keep_alive The retention period for the search and its results. - # It defaults to the +keep_alive+ period for the original SQL search. + # It defaults to the `keep_alive` period for the original SQL search. # @option arguments [Time] :wait_for_completion_timeout The period to wait for complete results. # It defaults to no timeout, meaning the request waits for complete search results. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/sql/query.rb b/elasticsearch-api/lib/elasticsearch/api/actions/sql/query.rb index a7c0c200e5..faf92e71f9 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/sql/query.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/sql/query.rb @@ -26,8 +26,8 @@ module Actions # Run an SQL request. # # @option arguments [String] :format The format for the response. - # You can also specify a format using the +Accept+ HTTP header. - # If you specify both this parameter and the +Accept+ HTTP header, this parameter takes precedence. + # You can also specify a format using the `Accept` HTTP header. + # If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/sql/translate.rb b/elasticsearch-api/lib/elasticsearch/api/actions/sql/translate.rb index 14e8e89f4c..8256a6cdb2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/sql/translate.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/sql/translate.rb @@ -24,7 +24,7 @@ module SQL module Actions # Translate SQL into Elasticsearch queries. # Translate an SQL search into a search API request containing Query DSL. - # It accepts the same request body parameters as the SQL search API, excluding +cursor+. + # It accepts the same request body parameters as the SQL search API, excluding `cursor`. # # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/ssl/certificates.rb b/elasticsearch-api/lib/elasticsearch/api/actions/ssl/certificates.rb index 5d6f2882a2..3d19ce8dd2 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/ssl/certificates.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/ssl/certificates.rb @@ -28,8 +28,8 @@ module Actions # - Settings for transport and HTTP interfaces # - TLS settings that are used within authentication realms # - TLS settings for remote monitoring exporters - # The list includes certificates that are used for configuring trust, such as those configured in the +xpack.security.transport.ssl.truststore+ and +xpack.security.transport.ssl.certificate_authorities+ settings. - # It also includes certificates that are used for configuring server identity, such as +xpack.security.http.ssl.keystore+ and +xpack.security.http.ssl.certificate settings+. + # The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. + # It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. # The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. # NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. # If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/delete_synonym_rule.rb b/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/delete_synonym_rule.rb index 0e057bcd21..d7c622b11d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/delete_synonym_rule.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/delete_synonym_rule.rb @@ -27,8 +27,8 @@ module Actions # # @option arguments [String] :set_id The ID of the synonym set to update. (*Required*) # @option arguments [String] :rule_id The ID of the synonym rule to delete. (*Required*) - # @option arguments [Boolean] :refresh If +true+, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. - # If +false+, analyzers will not be reloaded with the deleted synonym rule Server default: true. + # @option arguments [Boolean] :refresh If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. + # If `false`, analyzers will not be reloaded with the deleted synonym rule Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/put_synonym.rb b/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/put_synonym.rb index d4b7537ac6..1fa376f0cc 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/put_synonym.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/put_synonym.rb @@ -29,8 +29,8 @@ module Actions # This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. # # @option arguments [String] :id The ID of the synonyms set to be created or updated. (*Required*) - # @option arguments [Boolean] :refresh If +true+, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. - # If +false+, analyzers will not be reloaded with the new synonym set Server default: true. + # @option arguments [Boolean] :refresh If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. + # If `false`, analyzers will not be reloaded with the new synonym set Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/put_synonym_rule.rb b/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/put_synonym_rule.rb index ed1a8ec671..b036cdc097 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/put_synonym_rule.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/synonyms/put_synonym_rule.rb @@ -29,8 +29,8 @@ module Actions # # @option arguments [String] :set_id The ID of the synonym set. (*Required*) # @option arguments [String] :rule_id The ID of the synonym rule to be updated or created. (*Required*) - # @option arguments [Boolean] :refresh If +true+, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. - # If +false+, analyzers will not be reloaded with the new synonym rule Server default: true. + # @option arguments [Boolean] :refresh If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. + # If `false`, analyzers will not be reloaded with the new synonym rule Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/tasks/cancel.rb b/elasticsearch-api/lib/elasticsearch/api/actions/tasks/cancel.rb index 3f6f0ccf2d..7ca062595c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/tasks/cancel.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/tasks/cancel.rb @@ -29,7 +29,7 @@ module Actions # It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. # The get task information API will continue to list these cancelled tasks until they complete. # The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. - # To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the +?detailed+ parameter to identify the other tasks the system is running. + # To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. # You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. # This functionality is Experimental and may be changed or removed # completely in a future release. Elastic will take a best effort approach diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/tasks/get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/tasks/get.rb index 3ca9ae65d5..caae6af864 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/tasks/get.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/tasks/get.rb @@ -35,7 +35,7 @@ module Actions # @option arguments [String] :task_id The task identifier. (*Required*) # @option arguments [Time] :timeout The period to wait for a response. # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. - # @option arguments [Boolean] :wait_for_completion If +true+, the request blocks until the task has completed. + # @option arguments [Boolean] :wait_for_completion If `true`, the request blocks until the task has completed. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/tasks/list.rb b/elasticsearch-api/lib/elasticsearch/api/actions/tasks/list.rb index 317642ee95..2c09c19850 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/tasks/list.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/tasks/list.rb @@ -27,14 +27,17 @@ module Actions # WARNING: The task management API is new and should still be considered a beta feature. # The API may change in ways that are not backwards compatible. # **Identifying running tasks** - # The +X-Opaque-Id header+, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. + # The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. # This enables you to track certain calls or associate certain tasks with the client that started them. # For example: - # + + # + # ``` # curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" - # + + # ``` + # # The API returns the following result: - # + + # + # ``` # HTTP/1.1 200 OK # X-Opaque-Id: 123456 # content-type: application/json; charset=UTF-8 @@ -70,29 +73,30 @@ module Actions # } # } # } - # + - # In this example, +X-Opaque-Id: 123456+ is the ID as a part of the response header. - # The +X-Opaque-Id+ in the task +headers+ is the ID for the task that was initiated by the REST request. - # The +X-Opaque-Id+ in the children +headers+ is the child task of the task that was initiated by the REST request. + # ``` + # + # In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. + # The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. + # The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. # This functionality is Experimental and may be changed or removed # completely in a future release. Elastic will take a best effort approach # to fix any issues, but experimental features are not subject to the # support SLA of official GA features. # # @option arguments [String] :actions A comma-separated list or wildcard expression of actions used to limit the request. - # For example, you can use +cluser:*+ to retrieve all cluster-related tasks. - # @option arguments [Boolean] :detailed If +true+, the response includes detailed information about the running tasks. + # For example, you can use `cluser:*` to retrieve all cluster-related tasks. + # @option arguments [Boolean] :detailed If `true`, the response includes detailed information about the running tasks. # This information is useful to distinguish tasks from each other but is more costly to run. # @option arguments [String] :group_by A key that is used to group tasks in the response. # The task lists can be grouped either by nodes or by parent tasks. # @option arguments [String, Array] :nodes A comma-separated list of node IDs or names that is used to limit the returned information. # @option arguments [String] :parent_task_id A parent task identifier that is used to limit returned information. - # To return all tasks, omit this parameter or use a value of +-1+. + # To return all tasks, omit this parameter or use a value of `-1`. # If the parent task is not found, the API does not return a 404 response code. # @option arguments [Time] :timeout The period to wait for each node to respond. # If a node does not respond before its timeout expires, the response does not include its information. - # However, timed out nodes are included in the +node_failures+ property. Server default: 30s. - # @option arguments [Boolean] :wait_for_completion If +true+, the request blocks until the operation is complete. + # However, timed out nodes are included in the `node_failures` property. Server default: 30s. + # @option arguments [Boolean] :wait_for_completion If `true`, the request blocks until the operation is complete. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/terms_enum.rb b/elasticsearch-api/lib/elasticsearch/api/actions/terms_enum.rb index ce09a037b1..a1116f72f7 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/terms_enum.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/terms_enum.rb @@ -26,8 +26,8 @@ module Actions # This API is designed for low-latency look-ups used in auto-complete scenarios. # # @option arguments [String] :index A comma-separated list of data streams, indices, and index aliases to search. - # Wildcard (+*+) expressions are supported. - # To search all data streams or indices, omit this parameter or use +*+ or +_all+. (*Required*) + # Wildcard (`*`) expressions are supported. + # To search all data streams or indices, omit this parameter or use `*` or `_all`. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/termvectors.rb b/elasticsearch-api/lib/elasticsearch/api/actions/termvectors.rb index eacb145c24..2c4a111716 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/termvectors.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/termvectors.rb @@ -24,44 +24,46 @@ module Actions # Get term vector information. # Get information and statistics about terms in the fields of a particular document. # You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. - # You can specify the fields you are interested in through the +fields+ parameter or by adding the fields to the request body. + # You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. # For example: - # + + # + # ``` # GET /my-index-000001/_termvectors/1?fields=message - # + + # ``` + # # Fields can be specified using wildcards, similar to the multi match query. # Term vectors are real-time by default, not near real-time. - # This can be changed by setting +realtime+ parameter to +false+. + # This can be changed by setting `realtime` parameter to `false`. # You can request three types of values: _term information_, _term statistics_, and _field statistics_. # By default, all term information and field statistics are returned for all fields but term statistics are excluded. # **Term information** # * term frequency in the field (always returned) - # * term positions (+positions: true+) - # * start and end offsets (+offsets: true+) - # * term payloads (+payloads: true+), as base64 encoded bytes + # * term positions (`positions: true`) + # * start and end offsets (`offsets: true`) + # * term payloads (`payloads: true`), as base64 encoded bytes # If the requested information wasn't stored in the index, it will be computed on the fly if possible. # Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. # # @option arguments [String] :index The name of the index that contains the document. (*Required*) # @option arguments [String] :id A unique identifier for the document. # @option arguments [String, Array] :fields A comma-separated list or wildcard expressions of fields to include in the statistics. - # It is used as the default list unless a specific field list is provided in the +completion_fields+ or +fielddata_fields+ parameters. - # @option arguments [Boolean] :field_statistics If +true+, the response includes: + # It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. + # @option arguments [Boolean] :field_statistics If `true`, the response includes: # - The document count (how many documents contain this field). # - The sum of document frequencies (the sum of document frequencies for all terms in this field). # - The sum of total term frequencies (the sum of total term frequencies of each term in this field). Server default: true. - # @option arguments [Boolean] :offsets If +true+, the response includes term offsets. Server default: true. - # @option arguments [Boolean] :payloads If +true+, the response includes term payloads. Server default: true. - # @option arguments [Boolean] :positions If +true+, the response includes term positions. Server default: true. + # @option arguments [Boolean] :offsets If `true`, the response includes term offsets. Server default: true. + # @option arguments [Boolean] :payloads If `true`, the response includes term payloads. Server default: true. + # @option arguments [Boolean] :positions If `true`, the response includes term positions. Server default: true. # @option arguments [String] :preference The node or shard the operation should be performed on. # It is random by default. # @option arguments [Boolean] :realtime If true, the request is real-time as opposed to near-real-time. Server default: true. # @option arguments [String] :routing A custom value that is used to route operations to a specific shard. - # @option arguments [Boolean] :term_statistics If +true+, the response includes: + # @option arguments [Boolean] :term_statistics If `true`, the response includes: # - The total term frequency (how often a term occurs in all documents). # - The document frequency (the number of documents containing the current term). # By default these values are not returned since term statistics can have a serious performance impact. - # @option arguments [Integer] :version If +true+, returns the document version as part of a hit. + # @option arguments [Integer] :version If `true`, returns the document version as part of a hit. # @option arguments [String] :version_type The version type. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_field_structure.rb b/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_field_structure.rb index 2cef2c2986..d5705dcb8e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_field_structure.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_field_structure.rb @@ -25,7 +25,7 @@ module Actions # Find the structure of a text field. # Find the structure of a text field in an Elasticsearch index. # This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. - # For example, if you have ingested data into a very simple index that has just +@timestamp+ and message fields, you can use this API to see what common structure exists in the message field. + # For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. # The response from the API contains: # * Sample messages. # * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. @@ -33,73 +33,73 @@ module Actions # * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. # All this information can be calculated by the structure finder with no guidance. # However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - # If the structure finder produces unexpected results, specify the +explain+ query parameter and an explanation will appear in the response. + # If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. # It helps determine why the returned structure was chosen. # - # @option arguments [String] :column_names If +format+ is set to +delimited+, you can specify the column names in a comma-separated list. + # @option arguments [String] :column_names If `format` is set to `delimited`, you can specify the column names in a comma-separated list. # If this parameter is not specified, the structure finder uses the column names from the header row of the text. # If the text does not have a header row, columns are named "column1", "column2", "column3", for example. - # @option arguments [String] :delimiter If you have set +format+ to +delimited+, you can specify the character used to delimit the values in each row. + # @option arguments [String] :delimiter If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. # Only a single character is supported; the delimiter cannot have multiple characters. - # By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (+|+). + # By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). # In this default scenario, all rows must have the same number of fields for the delimited format to be detected. # If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. # @option arguments [Integer] :documents_to_sample The number of documents to include in the structural analysis. # The minimum value is 2. Server default: 1000. # @option arguments [String] :ecs_compatibility The mode of compatibility with ECS compliant Grok patterns. # Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. - # This setting primarily has an impact when a whole message Grok pattern such as +%{CATALINALOG}+ matches the input. - # If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as +path+, +ipaddress+, +field1+, and +field2+ are used in the +grok_pattern+ output. + # This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + # If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. # The intention in that situation is that a user who knows the meanings will rename the fields before using them. Server default: disabled. - # @option arguments [Boolean] :explain If +true+, the response includes a field named +explanation+, which is an array of strings that indicate how the structure finder produced its result. + # @option arguments [Boolean] :explain If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. # @option arguments [String] :field The field that should be analyzed. (*Required*) # @option arguments [String] :format The high level structure of the text. # By default, the API chooses the format. # In this default scenario, all rows must have the same number of fields for a delimited format to be detected. # If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - # @option arguments [String] :grok_pattern If the format is +semi_structured_text+, you can specify a Grok pattern that is used to extract fields from every message in the text. - # The name of the timestamp field in the Grok pattern must match what is specified in the +timestamp_field+ parameter. + # @option arguments [String] :grok_pattern If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + # The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. # If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". - # If +grok_pattern+ is not specified, the structure finder creates a Grok pattern. + # If `grok_pattern` is not specified, the structure finder creates a Grok pattern. # @option arguments [String] :index The name of the index that contains the analyzed field. (*Required*) - # @option arguments [String] :quote If the format is +delimited+, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + # @option arguments [String] :quote If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. # Only a single character is supported. - # If this parameter is not specified, the default value is a double quote (+"+). + # If this parameter is not specified, the default value is a double quote (`"`). # If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - # @option arguments [Boolean] :should_trim_fields If the format is +delimited+, you can specify whether values between delimiters should have whitespace trimmed from them. - # If this parameter is not specified and the delimiter is pipe (+|+), the default value is true. - # Otherwise, the default value is +false+. + # @option arguments [Boolean] :should_trim_fields If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + # If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + # Otherwise, the default value is `false`. # @option arguments [Time] :timeout The maximum amount of time that the structure analysis can take. # If the analysis is still running when the timeout expires, it will be stopped. Server default: 25s. # @option arguments [String] :timestamp_field The name of the field that contains the primary timestamp of each record in the text. - # In particular, if the text was ingested into an index, this is the field that would be used to populate the +@timestamp+ field.If the format is +semi_structured_text+, this field must match the name of the appropriate extraction in the +grok_pattern+. - # Therefore, for semi-structured text, it is best not to specify this parameter unless +grok_pattern+ is also specified.For structured text, if you specify this parameter, the field must exist within the text.If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + # In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field.If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + # Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified.For structured text, if you specify this parameter, the field must exist within the text.If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. # For structured text, it is not compulsory to have a timestamp in the text. # @option arguments [String] :timestamp_format The Java time format of the timestamp field in the text. # Only a subset of Java time format letter groups are supported: - # - +a+ - # - +d+ - # - +dd+ - # - +EEE+ - # - +EEEE+ - # - +H+ - # - +HH+ - # - +h+ - # - +M+ - # - +MM+ - # - +MMM+ - # - +MMMM+ - # - +mm+ - # - +ss+ - # - +XX+ - # - +XXX+ - # - +yy+ - # - +yyyy+ - # - +zzz+ - # Additionally +S+ letter groups (fractional seconds) of length one to nine are supported providing they occur after +ss+ and are separated from the +ss+ by a period (+.+), comma (+,+), or colon (+:+). - # Spacing and punctuation is also permitted with the exception a question mark (+?+), newline, and carriage return, together with literal text enclosed in single quotes. - # For example, +MM/dd HH.mm.ss,SSSSSS 'in' yyyy+ is a valid override format.One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full +grok_pattern+. - # Another is when the timestamp format is one that the structure finder does not consider by default.If this parameter is not specified, the structure finder chooses the best format from a built-in set.If the special value +null+ is specified, the structure finder will not look for a primary timestamp in the text. + # - `a` + # - `d` + # - `dd` + # - `EEE` + # - `EEEE` + # - `H` + # - `HH` + # - `h` + # - `M` + # - `MM` + # - `MMM` + # - `MMMM` + # - `mm` + # - `ss` + # - `XX` + # - `XXX` + # - `yy` + # - `yyyy` + # - `zzz` + # Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + # Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + # For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format.One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + # Another is when the timestamp format is one that the structure finder does not consider by default.If this parameter is not specified, the structure finder chooses the best format from a built-in set.If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. # When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. # @option arguments [Hash] :headers Custom HTTP headers # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_message_structure.rb b/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_message_structure.rb index 6fc99b4d65..15c68c7523 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_message_structure.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_message_structure.rb @@ -34,68 +34,68 @@ module Actions # Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. # All this information can be calculated by the structure finder with no guidance. # However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - # If the structure finder produces unexpected results, specify the +explain+ query parameter and an explanation will appear in the response. + # If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. # It helps determine why the returned structure was chosen. # - # @option arguments [String] :column_names If the format is +delimited+, you can specify the column names in a comma-separated list. + # @option arguments [String] :column_names If the format is `delimited`, you can specify the column names in a comma-separated list. # If this parameter is not specified, the structure finder uses the column names from the header row of the text. # If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - # @option arguments [String] :delimiter If you the format is +delimited+, you can specify the character used to delimit the values in each row. + # @option arguments [String] :delimiter If you the format is `delimited`, you can specify the character used to delimit the values in each row. # Only a single character is supported; the delimiter cannot have multiple characters. - # By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (+|+). + # By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). # In this default scenario, all rows must have the same number of fields for the delimited format to be detected. # If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. # @option arguments [String] :ecs_compatibility The mode of compatibility with ECS compliant Grok patterns. # Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. - # This setting primarily has an impact when a whole message Grok pattern such as +%{CATALINALOG}+ matches the input. - # If the structure finder identifies a common structure but has no idea of meaning then generic field names such as +path+, +ipaddress+, +field1+, and +field2+ are used in the +grok_pattern+ output, with the intention that a user who knows the meanings rename these fields before using it. Server default: disabled. - # @option arguments [Boolean] :explain If this parameter is set to true, the response includes a field named +explanation+, which is an array of strings that indicate how the structure finder produced its result. + # This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + # If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. Server default: disabled. + # @option arguments [Boolean] :explain If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. # @option arguments [String] :format The high level structure of the text. # By default, the API chooses the format. # In this default scenario, all rows must have the same number of fields for a delimited format to be detected. - # If the format is +delimited+ and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - # @option arguments [String] :grok_pattern If the format is +semi_structured_text+, you can specify a Grok pattern that is used to extract fields from every message in the text. - # The name of the timestamp field in the Grok pattern must match what is specified in the +timestamp_field+ parameter. + # If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. + # @option arguments [String] :grok_pattern If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + # The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. # If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". - # If +grok_pattern+ is not specified, the structure finder creates a Grok pattern. - # @option arguments [String] :quote If the format is +delimited+, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + # If `grok_pattern` is not specified, the structure finder creates a Grok pattern. + # @option arguments [String] :quote If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. # Only a single character is supported. - # If this parameter is not specified, the default value is a double quote (+"+). + # If this parameter is not specified, the default value is a double quote (`"`). # If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - # @option arguments [Boolean] :should_trim_fields If the format is +delimited+, you can specify whether values between delimiters should have whitespace trimmed from them. - # If this parameter is not specified and the delimiter is pipe (+|+), the default value is true. - # Otherwise, the default value is +false+. + # @option arguments [Boolean] :should_trim_fields If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + # If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + # Otherwise, the default value is `false`. # @option arguments [Time] :timeout The maximum amount of time that the structure analysis can take. # If the analysis is still running when the timeout expires, it will be stopped. Server default: 25s. # @option arguments [String] :timestamp_field The name of the field that contains the primary timestamp of each record in the text. - # In particular, if the text was ingested into an index, this is the field that would be used to populate the +@timestamp+ field.If the format is +semi_structured_text+, this field must match the name of the appropriate extraction in the +grok_pattern+. - # Therefore, for semi-structured text, it is best not to specify this parameter unless +grok_pattern+ is also specified.For structured text, if you specify this parameter, the field must exist within the text.If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + # In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field.If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + # Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified.For structured text, if you specify this parameter, the field must exist within the text.If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. # For structured text, it is not compulsory to have a timestamp in the text. # @option arguments [String] :timestamp_format The Java time format of the timestamp field in the text. # Only a subset of Java time format letter groups are supported: - # - +a+ - # - +d+ - # - +dd+ - # - +EEE+ - # - +EEEE+ - # - +H+ - # - +HH+ - # - +h+ - # - +M+ - # - +MM+ - # - +MMM+ - # - +MMMM+ - # - +mm+ - # - +ss+ - # - +XX+ - # - +XXX+ - # - +yy+ - # - +yyyy+ - # - +zzz+ - # Additionally +S+ letter groups (fractional seconds) of length one to nine are supported providing they occur after +ss+ and are separated from the +ss+ by a period (+.+), comma (+,+), or colon (+:+). - # Spacing and punctuation is also permitted with the exception a question mark (+?+), newline, and carriage return, together with literal text enclosed in single quotes. - # For example, +MM/dd HH.mm.ss,SSSSSS 'in' yyyy+ is a valid override format.One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full +grok_pattern+. - # Another is when the timestamp format is one that the structure finder does not consider by default.If this parameter is not specified, the structure finder chooses the best format from a built-in set.If the special value +null+ is specified, the structure finder will not look for a primary timestamp in the text. + # - `a` + # - `d` + # - `dd` + # - `EEE` + # - `EEEE` + # - `H` + # - `HH` + # - `h` + # - `M` + # - `MM` + # - `MMM` + # - `MMMM` + # - `mm` + # - `ss` + # - `XX` + # - `XXX` + # - `yy` + # - `yyyy` + # - `zzz` + # Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + # Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + # For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format.One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + # Another is when the timestamp format is one that the structure finder does not consider by default.If this parameter is not specified, the structure finder chooses the best format from a built-in set.If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. # When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_structure.rb b/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_structure.rb index b5fa98ab50..489ce1d202 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_structure.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/find_structure.rb @@ -38,33 +38,33 @@ module Actions # # @option arguments [String] :charset The text's character set. # It must be a character set that is supported by the JVM that Elasticsearch uses. - # For example, +UTF-8+, +UTF-16LE+, +windows-1252+, or +EUC-JP+. + # For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. # If this parameter is not specified, the structure finder chooses an appropriate character set. - # @option arguments [String] :column_names If you have set format to +delimited+, you can specify the column names in a comma-separated list. + # @option arguments [String] :column_names If you have set format to `delimited`, you can specify the column names in a comma-separated list. # If this parameter is not specified, the structure finder uses the column names from the header row of the text. # If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - # @option arguments [String] :delimiter If you have set +format+ to +delimited+, you can specify the character used to delimit the values in each row. + # @option arguments [String] :delimiter If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. # Only a single character is supported; the delimiter cannot have multiple characters. - # By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (+|+). + # By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). # In this default scenario, all rows must have the same number of fields for the delimited format to be detected. # If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. # @option arguments [String] :ecs_compatibility The mode of compatibility with ECS compliant Grok patterns. # Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. - # Valid values are +disabled+ and +v1+. - # This setting primarily has an impact when a whole message Grok pattern such as +%{CATALINALOG}+ matches the input. - # If the structure finder identifies a common structure but has no idea of meaning then generic field names such as +path+, +ipaddress+, +field1+, and +field2+ are used in the +grok_pattern+ output, with the intention that a user who knows the meanings rename these fields before using it. Server default: disabled. - # @option arguments [Boolean] :explain If this parameter is set to +true+, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. + # Valid values are `disabled` and `v1`. + # This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + # If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. Server default: disabled. + # @option arguments [Boolean] :explain If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. # If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. # @option arguments [String] :format The high level structure of the text. - # Valid values are +ndjson+, +xml+, +delimited+, and +semi_structured_text+. + # Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. # By default, the API chooses the format. # In this default scenario, all rows must have the same number of fields for a delimited format to be detected. - # If the format is set to +delimited+ and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - # @option arguments [String] :grok_pattern If you have set +format+ to +semi_structured_text+, you can specify a Grok pattern that is used to extract fields from every message in the text. - # The name of the timestamp field in the Grok pattern must match what is specified in the +timestamp_field+ parameter. + # If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. + # @option arguments [String] :grok_pattern If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + # The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. # If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". - # If +grok_pattern+ is not specified, the structure finder creates a Grok pattern. - # @option arguments [Boolean] :has_header_row If you have set +format+ to +delimited+, you can use this parameter to indicate whether the column names are in the first row of the text. + # If `grok_pattern` is not specified, the structure finder creates a Grok pattern. + # @option arguments [Boolean] :has_header_row If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. # If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. # @option arguments [Integer] :line_merge_size_limit The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. # If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. Server default: 10000. @@ -73,43 +73,43 @@ module Actions # If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines.NOTE: The number of lines and the variation of the lines affects the speed of the analysis. # For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. # If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. Server default: 1000. - # @option arguments [String] :quote If you have set +format+ to +delimited+, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + # @option arguments [String] :quote If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. # Only a single character is supported. - # If this parameter is not specified, the default value is a double quote (+"+). + # If this parameter is not specified, the default value is a double quote (`"`). # If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - # @option arguments [Boolean] :should_trim_fields If you have set +format+ to +delimited+, you can specify whether values between delimiters should have whitespace trimmed from them. - # If this parameter is not specified and the delimiter is pipe (+|+), the default value is +true+. - # Otherwise, the default value is +false+. + # @option arguments [Boolean] :should_trim_fields If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + # If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. + # Otherwise, the default value is `false`. # @option arguments [Time] :timeout The maximum amount of time that the structure analysis can take. # If the analysis is still running when the timeout expires then it will be stopped. Server default: 25s. # @option arguments [String] :timestamp_field The name of the field that contains the primary timestamp of each record in the text. - # In particular, if the text were ingested into an index, this is the field that would be used to populate the +@timestamp+ field.If the +format+ is +semi_structured_text+, this field must match the name of the appropriate extraction in the +grok_pattern+. - # Therefore, for semi-structured text, it is best not to specify this parameter unless +grok_pattern+ is also specified.For structured text, if you specify this parameter, the field must exist within the text.If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + # In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field.If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + # Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified.For structured text, if you specify this parameter, the field must exist within the text.If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. # For structured text, it is not compulsory to have a timestamp in the text. # @option arguments [String] :timestamp_format The Java time format of the timestamp field in the text.Only a subset of Java time format letter groups are supported: - # - +a+ - # - +d+ - # - +dd+ - # - +EEE+ - # - +EEEE+ - # - +H+ - # - +HH+ - # - +h+ - # - +M+ - # - +MM+ - # - +MMM+ - # - +MMMM+ - # - +mm+ - # - +ss+ - # - +XX+ - # - +XXX+ - # - +yy+ - # - +yyyy+ - # - +zzz+ - # Additionally +S+ letter groups (fractional seconds) of length one to nine are supported providing they occur after +ss+ and separated from the +ss+ by a +.+, +,+ or +:+. - # Spacing and punctuation is also permitted with the exception of +?+, newline and carriage return, together with literal text enclosed in single quotes. - # For example, +MM/dd HH.mm.ss,SSSSSS 'in' yyyy+ is a valid override format.One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full +grok_pattern+. - # Another is when the timestamp format is one that the structure finder does not consider by default.If this parameter is not specified, the structure finder chooses the best format from a built-in set.If the special value +null+ is specified the structure finder will not look for a primary timestamp in the text. + # - `a` + # - `d` + # - `dd` + # - `EEE` + # - `EEEE` + # - `H` + # - `HH` + # - `h` + # - `M` + # - `MM` + # - `MMM` + # - `MMMM` + # - `mm` + # - `ss` + # - `XX` + # - `XXX` + # - `yy` + # - `yyyy` + # - `zzz` + # Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. + # Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. + # For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format.One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + # Another is when the timestamp format is one that the structure finder does not consider by default.If this parameter is not specified, the structure finder chooses the best format from a built-in set.If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. # When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body text_files diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/test_grok_pattern.rb b/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/test_grok_pattern.rb index df8bdbde39..27deda610f 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/test_grok_pattern.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/text_structure/test_grok_pattern.rb @@ -28,7 +28,7 @@ module Actions # # @option arguments [String] :ecs_compatibility The mode of compatibility with ECS compliant Grok patterns. # Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. - # Valid values are +disabled+ and +v1+. Server default: disabled. + # Valid values are `disabled` and `v1`. Server default: disabled. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_transform.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_transform.rb index 91dcc59eb9..1160326d91 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_transform.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_transform.rb @@ -27,8 +27,8 @@ module Actions # # @option arguments [String, Array] :transform_id Identifier for the transform. It can be a transform identifier or a # wildcard expression. You can get information for all transforms by using - # +_all+, by specifying +*+ as the ++, or by omitting the - # ++. + # `_all`, by specifying `*` as the ``, or by omitting the + # ``. # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no transforms that match. # - Contains the _all string or no identifiers and there are no matches. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_transform_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_transform_stats.rb index 3b95969bc6..7453b289e6 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_transform_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_transform_stats.rb @@ -27,8 +27,8 @@ module Actions # # @option arguments [String, Array] :transform_id Identifier for the transform. It can be a transform identifier or a # wildcard expression. You can get information for all transforms by using - # +_all+, by specifying +*+ as the ++, or by omitting the - # ++. (*Required*) + # `_all`, by specifying `*` as the ``, or by omitting the + # ``. (*Required*) # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: # - Contains wildcard expressions and there are no transforms that match. # - Contains the _all string or no identifiers and there are no matches. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/put_transform.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/put_transform.rb index c8c4f3e32c..9d3da297e5 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/put_transform.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/put_transform.rb @@ -28,18 +28,18 @@ module Actions # a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a # unique row per entity. # You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If - # you choose to use the pivot method for your transform, the entities are defined by the set of +group_by+ fields in - # the pivot object. If you choose to use the latest method, the entities are defined by the +unique_key+ field values + # you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in + # the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values # in the latest object. - # You must have +create_index+, +index+, and +read+ privileges on the destination index and +read+ and - # +view_index_metadata+ privileges on the source indices. When Elasticsearch security features are enabled, the + # You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and + # `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the # transform remembers which roles the user that created it had at the time of creation and uses those same roles. If # those roles do not have the required privileges on the source and destination indices, the transform fails when it # attempts unauthorized operations. # NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any - # +.transform-internal*+ indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do - # not give users any privileges on +.transform-internal*+ indices. If you used transforms prior to 7.5, also do not - # give users any privileges on +.data-frame-internal*+ indices. + # `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do + # not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not + # give users any privileges on `.data-frame-internal*` indices. # # @option arguments [String] :transform_id Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), # hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/reset_transform.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/reset_transform.rb index 5bb1dc9e28..5348f512b0 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/reset_transform.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/reset_transform.rb @@ -23,12 +23,12 @@ module API module Transform module Actions # Reset a transform. - # Before you can reset it, you must stop it; alternatively, use the +force+ query parameter. + # Before you can reset it, you must stop it; alternatively, use the `force` query parameter. # If the destination index was created by the transform, it is deleted. # # @option arguments [String] :transform_id Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), # hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. (*Required*) - # @option arguments [Boolean] :force If this value is +true+, the transform is reset regardless of its current state. If it's +false+, the transform + # @option arguments [Boolean] :force If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform # must be stopped before it can be reset. # @option arguments [Time] :timeout Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/schedule_now_transform.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/schedule_now_transform.rb index 60e9630bf9..44724eec10 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/schedule_now_transform.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/schedule_now_transform.rb @@ -26,7 +26,7 @@ module Actions # Instantly run a transform to process data. # If you run this API, the transform will process the new data instantly, # without waiting for the configured frequency interval. After the API is called, - # the transform will be processed again at +now + frequency+ unless the API + # the transform will be processed again at `now + frequency` unless the API # is called again in the meantime. # # @option arguments [String] :transform_id Identifier for the transform. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/start_transform.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/start_transform.rb index 48fea92273..a212637a6e 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/start_transform.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/start_transform.rb @@ -23,10 +23,10 @@ module API module Transform module Actions # Start a transform. - # When you start a transform, it creates the destination index if it does not already exist. The +number_of_shards+ is - # set to +1+ and the +auto_expand_replicas+ is set to +0-1+. If it is a pivot transform, it deduces the mapping + # When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is + # set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping # definitions for the destination index from the source indices and the transform aggregations. If fields in the - # destination index are derived from scripts (as in the case of +scripted_metric+ or +bucket_script+ aggregations), + # destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), # the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce # mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you # start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/stop_transform.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/stop_transform.rb index 474a880bc5..5904dd23ac 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/stop_transform.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/stop_transform.rb @@ -26,13 +26,13 @@ module Actions # Stops one or more transforms. # # @option arguments [String] :transform_id Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. - # To stop all transforms, use +_all+ or +*+ as the identifier. (*Required*) + # To stop all transforms, use `_all` or `*` as the identifier. (*Required*) # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; - # contains the +_all+ string or no identifiers and there are no matches; contains wildcard expressions and there + # contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there # are only partial matches.If it is true, the API returns a successful acknowledgement message when there are no matches. When there are # only partial matches, the API stops the appropriate transforms.If it is false, the request returns a 404 status code when there are no matches or only partial matches. Server default: true. # @option arguments [Boolean] :force If it is true, the API forcefully stops the transforms. - # @option arguments [Time] :timeout Period to wait for a response when +wait_for_completion+ is +true+. If no response is received before the + # @option arguments [Time] :timeout Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the # timeout expires, the request returns a timeout exception. However, the request continues processing and # eventually moves the transform to a STOPPED state. Server default: 30s. # @option arguments [Boolean] :wait_for_checkpoint If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/update_transform.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/update_transform.rb index 014ecda2aa..02af1f0422 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/update_transform.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/update_transform.rb @@ -24,9 +24,9 @@ module Transform module Actions # Update a transform. # Updates certain properties of a transform. - # All updated properties except +description+ do not take effect until after the transform starts the next checkpoint, - # thus there is data consistency in each checkpoint. To use this API, you must have +read+ and +view_index_metadata+ - # privileges for the source indices. You must also have +index+ and +read+ privileges for the destination index. When + # All updated properties except `description` do not take effect until after the transform starts the next checkpoint, + # thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` + # privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When # Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the # time of update and runs with those privileges. # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/update.rb b/elasticsearch-api/lib/elasticsearch/api/actions/update.rb index cd71f18ea4..6226f2ee97 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/update.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/update.rb @@ -23,7 +23,7 @@ module API module Actions # Update a document. # Update a document by running a script or passing a partial document. - # If the Elasticsearch security features are enabled, you must have the +index+ or +write+ index privilege for the target index or index alias. + # If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. # The script can update, delete, or skip modifying the document. # The API also supports passing a partial document, which is merged into the existing document. # To fully replace an existing document, use the index API. @@ -32,8 +32,8 @@ module Actions # * Runs the specified script. # * Indexes the result. # The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. - # The +_source+ field must be enabled to use this API. - # In addition to +_source+, you can access the following variables through the +ctx+ map: +_index+, +_type+, +_id+, +_version+, +_routing+, and +_now+ (the current timestamp). + # The `_source` field must be enabled to use this API. + # In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). # # @option arguments [String] :id A unique identifier for the document to be updated. (*Required*) # @option arguments [String] :index The name of the target index. @@ -45,16 +45,16 @@ module Actions # @option arguments [String] :refresh If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. # If 'wait_for', it waits for a refresh to make this operation visible to search. # If 'false', it does nothing with refreshes. Server default: false. - # @option arguments [Boolean] :require_alias If +true+, the destination must be an index alias. + # @option arguments [Boolean] :require_alias If `true`, the destination must be an index alias. # @option arguments [Integer] :retry_on_conflict The number of times the operation should be retried when a conflict occurs. Server default: 0. # @option arguments [String] :routing A custom value used to route operations to a specific shard. # @option arguments [Time] :timeout The period to wait for the following operations: dynamic mapping updates and waiting for active shards. # Elasticsearch waits for at least the timeout period before failing. # The actual wait time could be longer, particularly when multiple waits occur. Server default: 1m. # @option arguments [Integer, String] :wait_for_active_shards The number of copies of each shard that must be active before proceeding with the operation. - # Set to 'all' or any positive integer up to the total number of shards in the index (+number_of_replicas++1). - # The default value of +1+ means it waits for each primary shard to be active. Server default: 1. - # @option arguments [Boolean, String, Array] :_source If +false+, source retrieval is turned off. + # Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). + # The default value of `1` means it waits for each primary shard to be active. Server default: 1. + # @option arguments [Boolean, String, Array] :_source If `false`, source retrieval is turned off. # You can also specify a comma-separated list of the fields you want to retrieve. Server default: true. # @option arguments [String, Array] :_source_excludes The source fields you want to exclude. # @option arguments [String, Array] :_source_includes The source fields you want to retrieve. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/update_by_query.rb b/elasticsearch-api/lib/elasticsearch/api/actions/update_by_query.rb index 0abbf268ca..b24bc8c149 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/update_by_query.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/update_by_query.rb @@ -25,46 +25,48 @@ module Actions # Updates documents that match the specified query. # If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. # If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: - # * +read+ - # * +index+ or +write+ + # * `read` + # * `index` or `write` # You can specify the query criteria in the request URI or the request body using the same syntax as the search API. # When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. # When the versions match, the document is updated and the version number is incremented. # If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. - # You can opt to count version conflicts instead of halting and returning by setting +conflicts+ to +proceed+. - # Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than +max_docs+ until it has successfully updated +max_docs+ documents or it has gone through every document in the source query. + # You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. + # Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. # NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. # While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. # A bulk update request is performed for each batch of matching documents. # Any query or update failures cause the update by query request to fail and the failures are shown in the response. # Any update requests that completed successfully still stick, they are not rolled back. # **Throttling update requests** - # To control the rate at which update by query issues batches of update operations, you can set +requests_per_second+ to any positive decimal number. + # To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. # This pads each batch with a wait time to throttle the rate. - # Set +requests_per_second+ to +-1+ to turn off throttling. + # Set `requests_per_second` to `-1` to turn off throttling. # Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. - # The padding time is the difference between the batch size divided by the +requests_per_second+ and the time spent writing. - # By default the batch size is 1000, so if +requests_per_second+ is set to +500+: - # + + # The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. + # By default the batch size is 1000, so if `requests_per_second` is set to `500`: + # + # ``` # target_time = 1000 / 500 per second = 2 seconds # wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds - # + + # ``` + # # Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. # This is "bursty" instead of "smooth". # **Slicing** # Update by query supports sliced scroll to parallelize the update process. # This can improve efficiency and provide a convenient way to break the request down into smaller parts. - # Setting +slices+ to +auto+ chooses a reasonable number for most data streams and indices. + # Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. # This setting will use one slice per shard, up to a certain limit. # If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. - # Adding +slices+ to +_update_by_query+ just automates the manual process of creating sub-requests, which means it has some quirks: + # Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: # * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. - # * Fetching the status of the task for the request with +slices+ only contains the status of completed slices. + # * Fetching the status of the task for the request with `slices` only contains the status of completed slices. # * These sub-requests are individually addressable for things like cancellation and rethrottling. - # * Rethrottling the request with +slices+ will rethrottle the unfinished sub-request proportionally. + # * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. # * Canceling the request with slices will cancel each sub-request. # * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. - # * Parameters like +requests_per_second+ and +max_docs+ on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using +max_docs+ with +slices+ might not result in exactly +max_docs+ documents being updated. + # * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. # * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. # If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: # * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. @@ -72,51 +74,51 @@ module Actions # Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. # **Update the document source** # Update by query supports scripts to update the document source. - # As with the update API, you can set +ctx.op+ to change the operation that is performed. - # Set +ctx.op = "noop"+ if your script decides that it doesn't have to make any changes. - # The update by query operation skips updating the document and increments the +noop+ counter. - # Set +ctx.op = "delete"+ if your script decides that the document should be deleted. - # The update by query operation deletes the document and increments the +deleted+ counter. - # Update by query supports only +index+, +noop+, and +delete+. - # Setting +ctx.op+ to anything else is an error. - # Setting any other field in +ctx+ is an error. + # As with the update API, you can set `ctx.op` to change the operation that is performed. + # Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. + # The update by query operation skips updating the document and increments the `noop` counter. + # Set `ctx.op = "delete"` if your script decides that the document should be deleted. + # The update by query operation deletes the document and increments the `deleted` counter. + # Update by query supports only `index`, `noop`, and `delete`. + # Setting `ctx.op` to anything else is an error. + # Setting any other field in `ctx` is an error. # This API enables you to only modify the source of matching documents; you cannot move them. # # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases to search. - # It supports wildcards (+*+). - # To search all data streams or indices, omit this parameter or use +*+ or +_all+. (*Required*) - # @option arguments [Boolean] :allow_no_indices If +false+, the request returns an error if any wildcard expression, index alias, or +_all+ value targets only missing or closed indices. + # It supports wildcards (`*`). + # To search all data streams or indices, omit this parameter or use `*` or `_all`. (*Required*) + # @option arguments [Boolean] :allow_no_indices If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. # This behavior applies even if the request targets other open indices. - # For example, a request targeting +foo*,bar*+ returns an error if an index starts with +foo+ but no index starts with +bar+. Server default: true. + # For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. Server default: true. # @option arguments [String] :analyzer The analyzer to use for the query string. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [Boolean] :analyze_wildcard If +true+, wildcard and prefix queries are analyzed. - # This parameter can be used only when the +q+ query string parameter is specified. - # @option arguments [String] :conflicts The preferred behavior when update by query hits version conflicts: +abort+ or +proceed+. Server default: abort. - # @option arguments [String] :default_operator The default operator for query string query: +AND+ or +OR+. - # This parameter can be used only when the +q+ query string parameter is specified. Server default: OR. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [Boolean] :analyze_wildcard If `true`, wildcard and prefix queries are analyzed. + # This parameter can be used only when the `q` query string parameter is specified. + # @option arguments [String] :conflicts The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. Server default: abort. + # @option arguments [String] :default_operator The default operator for query string query: `AND` or `OR`. + # This parameter can be used only when the `q` query string parameter is specified. Server default: OR. # @option arguments [String] :df The field to use as default where no field prefix is given in the query string. - # This parameter can be used only when the +q+ query string parameter is specified. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match. # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - # It supports comma-separated values, such as +open,hidden+. - # Valid values are: +all+, +open+, +closed+, +hidden+, +none+. + # It supports comma-separated values, such as `open,hidden`. + # Valid values are: `all`, `open`, `closed`, `hidden`, `none`. # @option arguments [Integer] :from Skips the specified number of documents. Server default: 0. - # @option arguments [Boolean] :ignore_unavailable If +false+, the request returns an error if it targets a missing or closed index. - # @option arguments [Boolean] :lenient If +true+, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - # This parameter can be used only when the +q+ query string parameter is specified. + # @option arguments [Boolean] :ignore_unavailable If `false`, the request returns an error if it targets a missing or closed index. + # @option arguments [Boolean] :lenient If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + # This parameter can be used only when the `q` query string parameter is specified. # @option arguments [Integer] :max_docs The maximum number of documents to process. # It defaults to all documents. - # When set to a value less then or equal to +scroll_size+ then a scroll will not be used to retrieve the results for the operation. + # When set to a value less then or equal to `scroll_size` then a scroll will not be used to retrieve the results for the operation. # @option arguments [String] :pipeline The ID of the pipeline to use to preprocess incoming documents. - # If the index has a default ingest pipeline specified, then setting the value to +_none+ disables the default ingest pipeline for this request. + # If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. # If a final pipeline is configured it will always run, regardless of the value of this parameter. # @option arguments [String] :preference The node or shard the operation should be performed on. # It is random by default. # @option arguments [String] :q A query in the Lucene query string syntax. - # @option arguments [Boolean] :refresh If +true+, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. - # This is different than the update API's +refresh+ parameter, which causes just the shard that received the request to be refreshed. - # @option arguments [Boolean] :request_cache If +true+, the request cache is used for this request. + # @option arguments [Boolean] :refresh If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. + # This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. + # @option arguments [Boolean] :request_cache If `true`, the request cache is used for this request. # It defaults to the index-level setting. # @option arguments [Float] :requests_per_second The throttle for this request in sub-requests per second. Server default: -1. # @option arguments [String] :routing A custom value used to route operations to a specific shard. @@ -124,10 +126,10 @@ module Actions # @option arguments [Integer] :scroll_size The size of the scroll request that powers the operation. Server default: 1000. # @option arguments [Time] :search_timeout An explicit timeout for each search request. # By default, there is no timeout. - # @option arguments [String] :search_type The type of the search operation. Available options include +query_then_fetch+ and +dfs_query_then_fetch+. + # @option arguments [String] :search_type The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. # @option arguments [Integer, String] :slices The number of slices this task should be divided into. Server default: 1. # @option arguments [Array] :sort A comma-separated list of : pairs. - # @option arguments [Array] :stats The specific +tag+ of the request for logging and statistical purposes. + # @option arguments [Array] :stats The specific `tag` of the request for logging and statistical purposes. # @option arguments [Integer] :terminate_after The maximum number of documents to collect for each shard. # If a query reaches this limit, Elasticsearch terminates the query early. # Elasticsearch collects documents before sorting.IMPORTANT: Use with caution. @@ -138,15 +140,15 @@ module Actions # By default, it is one minute. # This guarantees Elasticsearch waits for at least the timeout before failing. # The actual wait time could be longer, particularly when multiple waits occur. Server default: 1m. - # @option arguments [Boolean] :version If +true+, returns the document version as part of a hit. + # @option arguments [Boolean] :version If `true`, returns the document version as part of a hit. # @option arguments [Boolean] :version_type Should the document increment the version number (internal) on hit or not (reindex) # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation. - # Set to +all+ or any positive integer up to the total number of shards in the index (+number_of_replicas+1+). - # The +timeout+ parameter controls how long each write request waits for unavailable shards to become available. + # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + # The `timeout` parameter controls how long each write request waits for unavailable shards to become available. # Both work exactly the way they work in the bulk API. Server default: 1. - # @option arguments [Boolean] :wait_for_completion If +true+, the request blocks until the operation is complete. - # If +false+, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. - # Elasticsearch creates a record of this task as a document at +.tasks/task/${taskId}+. Server default: true. + # @option arguments [Boolean] :wait_for_completion If `true`, the request blocks until the operation is complete. + # If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. + # Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. Server default: true. # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body # diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/update_by_query_rethrottle.rb b/elasticsearch-api/lib/elasticsearch/api/actions/update_by_query_rethrottle.rb index d01810fb44..bce63b3001 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/update_by_query_rethrottle.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/update_by_query_rethrottle.rb @@ -27,7 +27,7 @@ module Actions # # @option arguments [String] :task_id The ID for the task. (*Required*) # @option arguments [Float] :requests_per_second The throttle for this request in sub-requests per second. - # To turn off throttling, set it to +-1+. Server default: -1. + # To turn off throttling, set it to `-1`. Server default: -1. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/ack_watch.rb b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/ack_watch.rb index 0597aa10d2..1e7b02c8ca 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/ack_watch.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/ack_watch.rb @@ -24,10 +24,10 @@ module Watcher module Actions # Acknowledge a watch. # Acknowledging a watch enables you to manually throttle the execution of the watch's actions. - # The acknowledgement state of an action is stored in the +status.actions..ack.state+ structure. + # The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. # IMPORTANT: If the specified watch is currently being executed, this API will return an error # The reason for this behavior is to prevent overwriting the watch status from a watch execution. - # Acknowledging an action throttles further executions of that action until its +ack.state+ is reset to +awaits_successful_execution+. + # Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. # This happens when the condition of the watch is not met (the condition evaluates to false). # # @option arguments [String] :watch_id The watch identifier. (*Required*) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/delete_watch.rb b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/delete_watch.rb index e30f3324cd..db031d1b38 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/delete_watch.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/delete_watch.rb @@ -23,11 +23,11 @@ module API module Watcher module Actions # Delete a watch. - # When the watch is removed, the document representing the watch in the +.watches+ index is gone and it will never be run again. + # When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. # Deleting a watch does not delete any watch execution records related to this watch from the watch history. # IMPORTANT: Deleting a watch must be done by using only this API. - # Do not delete the watch directly from the +.watches+ index using the Elasticsearch delete document API - # When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the +.watches+ index. + # Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API + # When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. # # @option arguments [String] :id The watch identifier. (*Required*) # @option arguments [Hash] :headers Custom HTTP headers diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/execute_watch.rb b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/execute_watch.rb index d24e74b3a5..b0eabbb413 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/execute_watch.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/execute_watch.rb @@ -30,7 +30,7 @@ module Actions # You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. # This serves as great tool for testing and debugging your watches prior to adding them to Watcher. # When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. - # If your user is allowed to read index +a+, but not index +b+, then the exact same set of rules will apply during execution of a watch. + # If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. # When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. # # @option arguments [String] :id The watch identifier. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/get_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/get_settings.rb index 4ff78f573a..8e3b80cc1a 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/get_settings.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/get_settings.rb @@ -23,8 +23,8 @@ module API module Watcher module Actions # Get Watcher index settings. - # Get settings for the Watcher internal index (+.watches+). - # Only a subset of settings are shown, for example +index.auto_expand_replicas+ and +index.number_of_replicas+. + # Get settings for the Watcher internal index (`.watches`). + # Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. # # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/put_watch.rb b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/put_watch.rb index a33053d7d8..1240764ca5 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/put_watch.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/put_watch.rb @@ -23,18 +23,18 @@ module API module Watcher module Actions # Create or update a watch. - # When a watch is registered, a new document that represents the watch is added to the +.watches+ index and its trigger is immediately registered with the relevant trigger engine. - # Typically for the +schedule+ trigger, the scheduler is the trigger engine. + # When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. + # Typically for the `schedule` trigger, the scheduler is the trigger engine. # IMPORTANT: You must use Kibana or this API to create a watch. - # Do not add a watch directly to the +.watches+ index by using the Elasticsearch index API. - # If Elasticsearch security features are enabled, do not give users write privileges on the +.watches+ index. + # Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. + # If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. # When you add a watch you can also define its initial active state by setting the *active* parameter. # When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. - # If the user is able to read index +a+, but not index +b+, the same will apply when the watch runs. + # If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. # # @option arguments [String] :id The identifier for the watch. (*Required*) # @option arguments [Boolean] :active The initial state of the watch. - # The default value is +true+, which means the watch is active by default. Server default: true. + # The default value is `true`, which means the watch is active by default. Server default: true. # @option arguments [Integer] :if_primary_term only update the watch if the last operation that has changed the watch has the specified primary term # @option arguments [Integer] :if_seq_no only update the watch if the last operation that has changed the watch has the specified sequence number # @option arguments [Integer] :version Explicit version number for concurrency control diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/query_watches.rb b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/query_watches.rb index 144bbbb326..ead726263d 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/query_watches.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/query_watches.rb @@ -24,7 +24,7 @@ module Watcher module Actions # Query watches. # Get all registered watches in a paginated manner and optionally filter watches by a query. - # Note that only the +_id+ and +metadata.*+ fields are queryable or sortable. + # Note that only the `_id` and `metadata.*` fields are queryable or sortable. # # @option arguments [Hash] :headers Custom HTTP headers # @option arguments [Hash] :body request body diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/stop.rb b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/stop.rb index 915ff2b7ac..1830fd5528 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/stop.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/stop.rb @@ -27,7 +27,7 @@ module Actions # # @option arguments [Time] :master_timeout The period to wait for the master node. # If the master node is not available before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/update_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/update_settings.rb index bd205d873b..d900f6c99c 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/watcher/update_settings.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/watcher/update_settings.rb @@ -23,12 +23,12 @@ module API module Watcher module Actions # Update Watcher index settings. - # Update settings for the Watcher internal index (+.watches+). + # Update settings for the Watcher internal index (`.watches`). # Only a subset of settings can be modified. - # This includes +index.auto_expand_replicas+, +index.number_of_replicas+, +index.routing.allocation.exclude.*+, - # +index.routing.allocation.include.*+ and +index.routing.allocation.require.*+. - # Modification of +index.routing.allocation.include._tier_preference+ is an exception and is not allowed as the - # Watcher shards must always be in the +data_content+ tier. + # This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, + # `index.routing.allocation.include.*` and `index.routing.allocation.require.*`. + # Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the + # Watcher shards must always be in the `data_content` tier. # # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/xpack/info.rb b/elasticsearch-api/lib/elasticsearch/api/actions/xpack/info.rb index 5cb6731ce8..f2ebda5740 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/xpack/info.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/xpack/info.rb @@ -29,7 +29,7 @@ module Actions # * Feature information for the features that are currently enabled and available under the current license. # # @option arguments [Array] :categories A comma-separated list of the information categories to include in the response. - # For example, +build,license,features+. + # For example, `build,license,features`. # @option arguments [Boolean] :accept_enterprise If this param is used it must be set to true # @option arguments [Boolean] :human Defines whether additional human-readable information is included in the response. # In particular, it adds descriptions and a tag line. Server default: true. diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/xpack/usage.rb b/elasticsearch-api/lib/elasticsearch/api/actions/xpack/usage.rb index d0224f2472..7fb85e9cf6 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/xpack/usage.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/xpack/usage.rb @@ -28,7 +28,7 @@ module Actions # # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. # If no response is received before the timeout expires, the request fails and returns an error. - # To indicate that the request should never timeout, set it to +-1+. Server default: 30s. + # To indicate that the request should never timeout, set it to `-1`. Server default: 30s. # @option arguments [Hash] :headers Custom HTTP headers # # @see https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack From 57940bc5bb8611b3a8fdcb7cf9079159f729e8cf Mon Sep 17 00:00:00 2001 From: Fernando Briano Date: Wed, 7 May 2025 16:32:54 +0100 Subject: [PATCH 3/3] [API] Updates transform.get_node_stats with proper method/no request body --- .../api/actions/transform/get_node_stats.rb | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_node_stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_node_stats.rb index 1f480a8eab..b5629cd085 100644 --- a/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_node_stats.rb +++ b/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_node_stats.rb @@ -34,14 +34,9 @@ def get_node_stats(arguments = {}) arguments = arguments.clone headers = arguments.delete(:headers) || {} - body = arguments.delete(:body) - - method = if body - Elasticsearch::API::HTTP_POST - else - Elasticsearch::API::HTTP_GET - end + body = nil + method = Elasticsearch::API::HTTP_GET path = '_transform/_node_stats' params = {}