diff --git a/docs/data-sources/elasticsearch_indices.md b/docs/data-sources/elasticsearch_indices.md
new file mode 100644
index 000000000..2ede309bd
--- /dev/null
+++ b/docs/data-sources/elasticsearch_indices.md
@@ -0,0 +1,135 @@
+---
+subcategory: "Index"
+layout: ""
+page_title: "Elasticstack: elasticstack_elasticsearch_indices Data Source"
+description: |-
+ Retrieves indices.
+---
+
+# Data Source: elasticstack_elasticsearch_indices
+
+Use this data source to retrieve and get information about existing Elasticsearch indices. See, https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html
+
+## Example Usage
+
+```terraform
+provider "elasticstack" {
+ elasticsearch {}
+ kibana {}
+}
+
+data "elasticstack_elasticsearch_indices" "logs" {
+ search = "log*"
+}
+```
+
+
+## Schema
+
+### Optional
+
+- `target` (String) Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this attribute or use * or _all.
+
+### Read-Only
+
+- `id` (String) Generated ID for the indices.
+- `indices` (Attributes List) The list of indices. (see [below for nested schema](#nestedatt--indices))
+
+
+### Nested Schema for `indices`
+
+Required:
+
+- `name` (String) Name of the index.
+
+Optional:
+
+- `alias` (Attributes Set) Aliases for the index. (see [below for nested schema](#nestedatt--indices--alias))
+- `analysis_analyzer` (String) A JSON string describing the analyzers applied to the index.
+- `analysis_char_filter` (String) A JSON string describing the char_filters applied to the index.
+- `analysis_filter` (String) A JSON string describing the filters applied to the index.
+- `analysis_normalizer` (String) A JSON string describing the normalizers applied to the index.
+- `analysis_tokenizer` (String) A JSON string describing the tokenizers applied to the index.
+- `analyze_max_token_count` (Number) The maximum number of tokens that can be produced using _analyze API.
+- `auto_expand_replicas` (String) Set the number of replicas to the node count in the cluster. Set to a dash delimited lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all)
+- `blocks_metadata` (Boolean) Set to `true` to disable index metadata reads and writes.
+- `blocks_read` (Boolean) Set to `true` to disable read operations against the index.
+- `blocks_read_only` (Boolean) Set to `true` to make the index and index metadata read only, `false` to allow writes and metadata changes.
+- `blocks_read_only_allow_delete` (Boolean) Identical to `index.blocks.read_only` but allows deleting the index to free up resources.
+- `blocks_write` (Boolean) Set to `true` to disable data write operations against the index. This setting does not affect metadata.
+- `codec` (String) The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` which uses DEFLATE for a higher compression ratio. This can be set only on creation.
+- `default_pipeline` (String) The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist.
+- `deletion_protection` (Boolean) Whether to allow Terraform to destroy the index. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply command that deletes the instance will fail.
+- `final_pipeline` (String) Final ingest pipeline for the index. Indexing requests will fail if the final pipeline is set and the pipeline does not exist. The final pipeline always runs after the request pipeline (if specified) and the default pipeline (if it exists). The special pipeline name _none indicates no ingest pipeline will run.
+- `gc_deletes` (String) The length of time that a deleted document's version number remains available for further versioned operations.
+- `highlight_max_analyzed_offset` (Number) The maximum number of characters that will be analyzed for a highlight request.
+- `include_type_name` (Boolean) If true, a mapping type is expected in the body of mappings. Defaults to false. Supported for Elasticsearch 7.x.
+- `indexing_slowlog_level` (String) Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace`
+- `indexing_slowlog_source` (String) Set the number of characters of the `_source` to include in the slowlog lines, `false` or `0` will skip logging the source entirely and setting it to `true` will log the entire source regardless of size. The original `_source` is reformatted by default to make sure that it fits on a single log line.
+- `indexing_slowlog_threshold_index_debug` (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `2s`
+- `indexing_slowlog_threshold_index_info` (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `5s`
+- `indexing_slowlog_threshold_index_trace` (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `500ms`
+- `indexing_slowlog_threshold_index_warn` (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `10s`
+- `load_fixed_bitset_filters_eagerly` (Boolean) Indicates whether cached filters are pre-loaded for nested queries. This can be set only on creation.
+- `mapping_coerce` (Boolean) Set index level coercion setting that is applied to all mapping types.
+- `mappings` (String) Mapping for fields in the index.
+ If specified, this mapping can include: field names, [field data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html), [mapping parameters](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html).
+ **NOTE:**
+ - Changing datatypes in the existing _mappings_ will force index to be re-created.
+ - Removing field will be ignored by default same as elasticsearch. You need to recreate the index to remove field completely.
+- `master_timeout` (String) Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. This value is ignored when running against Serverless projects.
+- `max_docvalue_fields_search` (Number) The maximum number of `docvalue_fields` that are allowed in a query.
+- `max_inner_result_window` (Number) The maximum value of `from + size` for inner hits definition and top hits aggregations to this index.
+- `max_ngram_diff` (Number) The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter.
+- `max_refresh_listeners` (Number) Maximum number of refresh listeners available on each shard of the index.
+- `max_regex_length` (Number) The maximum length of regex that can be used in Regexp Query.
+- `max_rescore_window` (Number) The maximum value of `window_size` for `rescore` requests in searches of this index.
+- `max_result_window` (Number) The maximum value of `from + size` for searches to this index.
+- `max_script_fields` (Number) The maximum number of `script_fields` that are allowed in a query.
+- `max_shingle_diff` (Number) The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter.
+- `max_terms_count` (Number) The maximum number of terms that can be used in Terms Query.
+- `number_of_replicas` (Number) Number of shard replicas.
+- `number_of_routing_shards` (Number) Value used with number_of_shards to route documents to a primary shard. This can be set only on creation.
+- `number_of_shards` (Number) Number of shards for the index. This can be set only on creation.
+- `query_default_field` (Set of String) Wildcard (*) patterns matching one or more fields. Defaults to '*', which matches all fields eligible for term-level queries, excluding metadata fields.
+- `refresh_interval` (String) How often to perform a refresh operation, which makes recent changes to the index visible to search. Can be set to `-1` to disable refresh.
+- `routing_allocation_enable` (String) Controls shard allocation for this index. It can be set to: `all` , `primaries` , `new_primaries` , `none`.
+- `routing_partition_size` (Number) The number of shards a custom routing value can go to. This can be set only on creation.
+- `routing_rebalance_enable` (String) Enables shard rebalancing for this index. It can be set to: `all`, `primaries` , `replicas` , `none`.
+- `search_idle_after` (String) How long a shard can not receive a search or get request until it’s considered search idle.
+- `search_slowlog_level` (String) Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace`
+- `search_slowlog_threshold_fetch_debug` (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `2s`
+- `search_slowlog_threshold_fetch_info` (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `5s`
+- `search_slowlog_threshold_fetch_trace` (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `500ms`
+- `search_slowlog_threshold_fetch_warn` (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `10s`
+- `search_slowlog_threshold_query_debug` (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `2s`
+- `search_slowlog_threshold_query_info` (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `5s`
+- `search_slowlog_threshold_query_trace` (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `500ms`
+- `search_slowlog_threshold_query_warn` (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `10s`
+- `shard_check_on_startup` (String) Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts `false`, `true`, `checksum`.
+- `sort_field` (Set of String) The field to sort shards in this index by.
+- `sort_order` (List of String) The direction to sort shards in. Accepts `asc`, `desc`.
+- `timeout` (String) Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`.
+- `unassigned_node_left_delayed_timeout` (String) Time to delay the allocation of replica shards which become unassigned because a node has left, in time units, e.g. `10s`
+- `wait_for_active_shards` (String) The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (number_of_replicas+1). Default: `1`, the primary shard. This value is ignored when running against Serverless projects.
+
+Read-Only:
+
+- `id` (String) Internal identifier of the resource.
+- `settings_raw` (String) All raw settings fetched from the cluster.
+
+
+### Nested Schema for `indices.alias`
+
+Required:
+
+- `name` (String) Index alias name.
+
+Optional:
+
+- `filter` (String) Query used to limit documents the alias can access.
+- `index_routing` (String) Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations.
+- `is_hidden` (Boolean) If true, the alias is hidden.
+- `is_write_index` (Boolean) If true, the index is the write index for the alias.
+- `routing` (String) Value used to route indexing and search operations to a specific shard.
+- `search_routing` (String) Value used to route search operations to a specific shard. If specified, this overwrites the routing value for search operations.
diff --git a/examples/data-sources/elasticstack_elasticsearch_indices/data-source.tf b/examples/data-sources/elasticstack_elasticsearch_indices/data-source.tf
new file mode 100644
index 000000000..eb58a34a8
--- /dev/null
+++ b/examples/data-sources/elasticstack_elasticsearch_indices/data-source.tf
@@ -0,0 +1,8 @@
+provider "elasticstack" {
+ elasticsearch {}
+ kibana {}
+}
+
+data "elasticstack_elasticsearch_indices" "logs" {
+ search = "log*"
+}
diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go
index 4481686c2..024dd4121 100644
--- a/internal/clients/elasticsearch/index.go
+++ b/internal/clients/elasticsearch/index.go
@@ -304,6 +304,16 @@ func DeleteIndex(ctx context.Context, apiClient *clients.ApiClient, name string)
}
func GetIndex(ctx context.Context, apiClient *clients.ApiClient, name string) (*models.Index, fwdiags.Diagnostics) {
+ indices, diags := GetIndices(ctx, apiClient, name)
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ index := indices[name]
+ return &index, diags
+}
+
+func GetIndices(ctx context.Context, apiClient *clients.ApiClient, name string) (map[string]models.Index, fwdiags.Diagnostics) {
esClient, err := apiClient.GetESClient()
if err != nil {
return nil, fwdiags.Diagnostics{
@@ -333,8 +343,8 @@ func GetIndex(ctx context.Context, apiClient *clients.ApiClient, name string) (*
fwdiags.NewErrorDiagnostic(err.Error(), err.Error()),
}
}
- index := indices[name]
- return &index, nil
+
+ return indices, nil
}
func DeleteIndexAlias(ctx context.Context, apiClient *clients.ApiClient, index string, aliases []string) fwdiags.Diagnostics {
diff --git a/internal/elasticsearch/index/indices/data_source.go b/internal/elasticsearch/index/indices/data_source.go
new file mode 100644
index 000000000..a8f8fa3ec
--- /dev/null
+++ b/internal/elasticsearch/index/indices/data_source.go
@@ -0,0 +1,46 @@
+package indices
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ datasource.DataSource = &dataSource{}
+ _ datasource.DataSourceWithConfigure = &dataSource{}
+)
+
+// NewDataSource is a helper function to simplify the provider implementation.
+func NewDataSource() datasource.DataSource {
+ return &dataSource{}
+}
+
+// dataSource is the data source implementation.
+type dataSource struct {
+ client clients.ApiClient
+}
+
+// Metadata returns the data source type name.
+func (d *dataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_elasticsearch_indices"
+}
+
+// Configure adds the provider configured client to the data source.
+func (d *dataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ // Add a nil check when handling ProviderData because Terraform
+ // sets that data after it calls the ConfigureProvider RPC.
+ if req.ProviderData == nil {
+ return
+ }
+
+ apiClient, diags := clients.ConvertProviderData(req.ProviderData)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ d.client = *apiClient
+}
diff --git a/internal/elasticsearch/index/indices/data_source_test.go b/internal/elasticsearch/index/indices/data_source_test.go
new file mode 100644
index 000000000..43614364e
--- /dev/null
+++ b/internal/elasticsearch/index/indices/data_source_test.go
@@ -0,0 +1,36 @@
+package indices_test
+
+import (
+ "testing"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/acctest"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+)
+
+func TestAccIndicesDataSource(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { acctest.PreCheck(t) },
+ ProtoV6ProviderFactories: acctest.Providers,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccIndicesDataSourceConfig,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("data.elasticstack_elasticsearch_indices.security_indices", "indices.0.name", ".security-7"),
+ resource.TestCheckResourceAttr("data.elasticstack_elasticsearch_indices.security_indices", "indices.0.number_of_shards", "1"),
+ resource.TestCheckResourceAttr("data.elasticstack_elasticsearch_indices.security_indices", "indices.0.alias.0.name", ".security"),
+ ),
+ },
+ },
+ })
+}
+
+const testAccIndicesDataSourceConfig = `
+provider "elasticstack" {
+ elasticsearch {}
+ kibana {}
+}
+
+data "elasticstack_elasticsearch_indices" "security_indices" {
+ target = ".security-*"
+}
+`
diff --git a/internal/elasticsearch/index/indices/models.go b/internal/elasticsearch/index/indices/models.go
new file mode 100644
index 000000000..bd735ffdc
--- /dev/null
+++ b/internal/elasticsearch/index/indices/models.go
@@ -0,0 +1,429 @@
+package indices
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/models"
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils"
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils/customtypes"
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+)
+
+var (
+ staticSettingsKeys = []string{
+ "number_of_shards",
+ "number_of_routing_shards",
+ "codec",
+ "routing_partition_size",
+ "load_fixed_bitset_filters_eagerly",
+ "shard.check_on_startup",
+ "sort.field",
+ "sort.order",
+ "mapping.coerce",
+ }
+ dynamicSettingsKeys = []string{
+ "number_of_replicas",
+ "auto_expand_replicas",
+ "refresh_interval",
+ "search.idle.after",
+ "max_result_window",
+ "max_inner_result_window",
+ "max_rescore_window",
+ "max_docvalue_fields_search",
+ "max_script_fields",
+ "max_ngram_diff",
+ "max_shingle_diff",
+ "blocks.read_only",
+ "blocks.read_only_allow_delete",
+ "blocks.read",
+ "blocks.write",
+ "blocks.metadata",
+ "max_refresh_listeners",
+ "analyze.max_token_count",
+ "highlight.max_analyzed_offset",
+ "max_terms_count",
+ "max_regex_length",
+ "query.default_field",
+ "routing.allocation.enable",
+ "routing.rebalance.enable",
+ "gc_deletes",
+ "default_pipeline",
+ "final_pipeline",
+ "unassigned.node_left.delayed_timeout",
+ "search.slowlog.threshold.query.warn",
+ "search.slowlog.threshold.query.info",
+ "search.slowlog.threshold.query.debug",
+ "search.slowlog.threshold.query.trace",
+ "search.slowlog.threshold.fetch.warn",
+ "search.slowlog.threshold.fetch.info",
+ "search.slowlog.threshold.fetch.debug",
+ "search.slowlog.threshold.fetch.trace",
+ "search.slowlog.level",
+ "indexing.slowlog.threshold.index.warn",
+ "indexing.slowlog.threshold.index.info",
+ "indexing.slowlog.threshold.index.debug",
+ "indexing.slowlog.threshold.index.trace",
+ "indexing.slowlog.level",
+ "indexing.slowlog.source",
+ }
+ allSettingsKeys = []string{}
+)
+
+func init() {
+ allSettingsKeys = append(allSettingsKeys, staticSettingsKeys...)
+ allSettingsKeys = append(allSettingsKeys, dynamicSettingsKeys...)
+}
+
+type tfModel struct {
+ ID types.String `tfsdk:"id"`
+ Target types.String `tfsdk:"target"`
+ Indices types.List `tfsdk:"indices"`
+}
+
+type indexTfModel struct {
+ ID types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+ NumberOfShards types.Int64 `tfsdk:"number_of_shards"`
+ NumberOfRoutingShards types.Int64 `tfsdk:"number_of_routing_shards"`
+ Codec types.String `tfsdk:"codec"`
+ RoutingPartitionSize types.Int64 `tfsdk:"routing_partition_size"`
+ LoadFixedBitsetFiltersEagerly types.Bool `tfsdk:"load_fixed_bitset_filters_eagerly"`
+ ShardCheckOnStartup types.String `tfsdk:"shard_check_on_startup"`
+ SortField types.Set `tfsdk:"sort_field"`
+ SortOrder types.List `tfsdk:"sort_order"`
+ MappingCoerce types.Bool `tfsdk:"mapping_coerce"`
+ NumberOfReplicas types.Int64 `tfsdk:"number_of_replicas"`
+ AutoExpandReplicas types.String `tfsdk:"auto_expand_replicas"`
+ SearchIdleAfter types.String `tfsdk:"search_idle_after"`
+ RefreshInterval types.String `tfsdk:"refresh_interval"`
+ MaxResultWindow types.Int64 `tfsdk:"max_result_window"`
+ MaxInnerResultWindow types.Int64 `tfsdk:"max_inner_result_window"`
+ MaxRescoreWindow types.Int64 `tfsdk:"max_rescore_window"`
+ MaxDocvalueFieldsSearch types.Int64 `tfsdk:"max_docvalue_fields_search"`
+ MaxScriptFields types.Int64 `tfsdk:"max_script_fields"`
+ MaxNGramDiff types.Int64 `tfsdk:"max_ngram_diff"`
+ MaxShingleDiff types.Int64 `tfsdk:"max_shingle_diff"`
+ MaxRefreshListeners types.Int64 `tfsdk:"max_refresh_listeners"`
+ AnalyzeMaxTokenCount types.Int64 `tfsdk:"analyze_max_token_count"`
+ HighlightMaxAnalyzedOffset types.Int64 `tfsdk:"highlight_max_analyzed_offset"`
+ MaxTermsCount types.Int64 `tfsdk:"max_terms_count"`
+ MaxRegexLength types.Int64 `tfsdk:"max_regex_length"`
+ QueryDefaultField types.Set `tfsdk:"query_default_field"`
+ RoutingAllocationEnable types.String `tfsdk:"routing_allocation_enable"`
+ RoutingRebalanceEnable types.String `tfsdk:"routing_rebalance_enable"`
+ GCDeletes types.String `tfsdk:"gc_deletes"`
+ BlocksReadOnly types.Bool `tfsdk:"blocks_read_only"`
+ BlocksReadOnlyAllowDelete types.Bool `tfsdk:"blocks_read_only_allow_delete"`
+ BlocksRead types.Bool `tfsdk:"blocks_read"`
+ BlocksWrite types.Bool `tfsdk:"blocks_write"`
+ BlocksMetadata types.Bool `tfsdk:"blocks_metadata"`
+ DefaultPipeline types.String `tfsdk:"default_pipeline"`
+ FinalPipeline types.String `tfsdk:"final_pipeline"`
+ UnassignedNodeLeftDelayedTimeout types.String `tfsdk:"unassigned_node_left_delayed_timeout"`
+ SearchSlowlogThresholdQueryWarn types.String `tfsdk:"search_slowlog_threshold_query_warn"`
+ SearchSlowlogThresholdQueryInfo types.String `tfsdk:"search_slowlog_threshold_query_info"`
+ SearchSlowlogThresholdQueryDebug types.String `tfsdk:"search_slowlog_threshold_query_debug"`
+ SearchSlowlogThresholdQueryTrace types.String `tfsdk:"search_slowlog_threshold_query_trace"`
+ SearchSlowlogThresholdFetchWarn types.String `tfsdk:"search_slowlog_threshold_fetch_warn"`
+ SearchSlowlogThresholdFetchInfo types.String `tfsdk:"search_slowlog_threshold_fetch_info"`
+ SearchSlowlogThresholdFetchDebug types.String `tfsdk:"search_slowlog_threshold_fetch_debug"`
+ SearchSlowlogThresholdFetchTrace types.String `tfsdk:"search_slowlog_threshold_fetch_trace"`
+ SearchSlowlogLevel types.String `tfsdk:"search_slowlog_level"`
+ IndexingSlowlogThresholdIndexWarn types.String `tfsdk:"indexing_slowlog_threshold_index_warn"`
+ IndexingSlowlogThresholdIndexInfo types.String `tfsdk:"indexing_slowlog_threshold_index_info"`
+ IndexingSlowlogThresholdIndexDebug types.String `tfsdk:"indexing_slowlog_threshold_index_debug"`
+ IndexingSlowlogThresholdIndexTrace types.String `tfsdk:"indexing_slowlog_threshold_index_trace"`
+ IndexingSlowlogLevel types.String `tfsdk:"indexing_slowlog_level"`
+ IndexingSlowlogSource types.String `tfsdk:"indexing_slowlog_source"`
+ AnalysisAnalyzer jsontypes.Normalized `tfsdk:"analysis_analyzer"`
+ AnalysisTokenizer jsontypes.Normalized `tfsdk:"analysis_tokenizer"`
+ AnalysisCharFilter jsontypes.Normalized `tfsdk:"analysis_char_filter"`
+ AnalysisFilter jsontypes.Normalized `tfsdk:"analysis_filter"`
+ AnalysisNormalizer jsontypes.Normalized `tfsdk:"analysis_normalizer"`
+ DeletionProtection types.Bool `tfsdk:"deletion_protection"`
+ IncludeTypeName types.Bool `tfsdk:"include_type_name"`
+ WaitForActiveShards types.String `tfsdk:"wait_for_active_shards"`
+ MasterTimeout customtypes.Duration `tfsdk:"master_timeout"`
+ Timeout customtypes.Duration `tfsdk:"timeout"`
+ Mappings jsontypes.Normalized `tfsdk:"mappings"`
+ SettingsRaw jsontypes.Normalized `tfsdk:"settings_raw"`
+ Alias types.Set `tfsdk:"alias"`
+}
+
+type aliasTfModel struct {
+ Name types.String `tfsdk:"name"`
+ Filter jsontypes.Normalized `tfsdk:"filter"`
+ IndexRouting types.String `tfsdk:"index_routing"`
+ IsHidden types.Bool `tfsdk:"is_hidden"`
+ IsWriteIndex types.Bool `tfsdk:"is_write_index"`
+ Routing types.String `tfsdk:"routing"`
+ SearchRouting types.String `tfsdk:"search_routing"`
+}
+
+func (model *indexTfModel) populateFromAPI(ctx context.Context, indexName string, apiModel models.Index) diag.Diagnostics {
+ model.Name = types.StringValue(indexName)
+ model.SortField = types.SetValueMust(types.StringType, []attr.Value{})
+ model.SortOrder = types.ListValueMust(types.StringType, []attr.Value{})
+ model.QueryDefaultField = types.SetValueMust(types.StringType, []attr.Value{})
+
+ modelMappings, diags := mappingsFromAPI(apiModel)
+ if diags.HasError() {
+ return diags
+ }
+ modelAliases, diags := aliasesFromAPI(ctx, apiModel)
+ if diags.HasError() {
+ return diags
+ }
+
+ model.Mappings = modelMappings
+ model.Alias = modelAliases
+
+ diags = setSettingsFromAPI(ctx, model, apiModel)
+ if diags.HasError() {
+ return diags
+ }
+
+ return nil
+}
+
+func mappingsFromAPI(apiModel models.Index) (jsontypes.Normalized, diag.Diagnostics) {
+ if apiModel.Mappings != nil {
+ mappingBytes, err := json.Marshal(apiModel.Mappings)
+ if err != nil {
+ return jsontypes.NewNormalizedNull(), diag.Diagnostics{
+ diag.NewErrorDiagnostic("failed to marshal index mappings", err.Error()),
+ }
+ }
+
+ return jsontypes.NewNormalizedValue(string(mappingBytes)), nil
+ }
+
+ return jsontypes.NewNormalizedNull(), nil
+}
+
+func aliasesFromAPI(ctx context.Context, apiModel models.Index) (basetypes.SetValue, diag.Diagnostics) {
+ aliases := []aliasTfModel{}
+ for name, alias := range apiModel.Aliases {
+ tfAlias, diags := newAliasModelFromAPI(name, alias)
+ if diags.HasError() {
+ return basetypes.SetValue{}, diags
+ }
+
+ aliases = append(aliases, tfAlias)
+ }
+
+ modelAliases, diags := types.SetValueFrom(ctx, aliasElementType(), aliases)
+ if diags.HasError() {
+ return basetypes.SetValue{}, diags
+ }
+
+ return modelAliases, nil
+}
+
+func newAliasModelFromAPI(name string, apiModel models.IndexAlias) (aliasTfModel, diag.Diagnostics) {
+ tfAlias := aliasTfModel{
+ Name: types.StringValue(name),
+ IndexRouting: types.StringValue(apiModel.IndexRouting),
+ IsHidden: types.BoolValue(apiModel.IsHidden),
+ IsWriteIndex: types.BoolValue(apiModel.IsWriteIndex),
+ Routing: types.StringValue(apiModel.Routing),
+ SearchRouting: types.StringValue(apiModel.SearchRouting),
+ }
+
+ if apiModel.Filter != nil {
+ filterBytes, err := json.Marshal(apiModel.Filter)
+ if err != nil {
+ return aliasTfModel{}, diag.Diagnostics{
+ diag.NewErrorDiagnostic("failed to marshal alias filter", err.Error()),
+ }
+ }
+
+ tfAlias.Filter = jsontypes.NewNormalizedValue(string(filterBytes))
+ }
+
+ return tfAlias, nil
+}
+
+func setSettingsFromAPI(ctx context.Context, model *indexTfModel, apiModel models.Index) diag.Diagnostics {
+ modelType := reflect.TypeOf(*model)
+
+ for _, key := range allSettingsKeys {
+ settingsValue, ok := apiModel.Settings["index."+key]
+ var tfValue attr.Value
+ if !ok {
+ continue
+ }
+
+ tfFieldKey := utils.ConvertSettingsKeyToTFFieldKey(key)
+ value, ok := model.getFieldValueByTagValue(tfFieldKey, modelType)
+ if !ok {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to find setting value",
+ fmt.Sprintf("expected setting with key %s", tfFieldKey),
+ ),
+ }
+ }
+
+ switch a := value.(type) {
+ case types.String:
+ settingStr, ok := settingsValue.(string)
+ if !ok {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to convert setting to string",
+ fmt.Sprintf("expected setting to be a string but got %t", settingsValue),
+ )}
+ }
+ tfValue = basetypes.NewStringValue(settingStr)
+ case types.Bool:
+ settingBool, ok := settingsValue.(bool)
+ if !ok {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to convert setting to bool",
+ fmt.Sprintf("expected setting to be a bool but got %t", settingsValue),
+ )}
+ }
+ tfValue = basetypes.NewBoolValue(settingBool)
+ case types.Int64:
+ if settingStr, ok := settingsValue.(string); ok {
+ settingInt, err := strconv.Atoi(settingStr)
+ if err != nil {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to convert setting to int",
+ fmt.Sprintf("expected setting to be an in but it was a string. Attempted to parse it but got %s", err.Error()),
+ ),
+ }
+ }
+
+ settingsValue = int64(settingInt)
+ }
+
+ settingInt, ok := settingsValue.(int64)
+ if !ok {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to convert setting to int",
+ fmt.Sprintf("expected setting to be a int but got %t", settingsValue),
+ )}
+ }
+ tfValue = basetypes.NewInt64Value(settingInt)
+ case types.List:
+ elemType := a.ElementType(ctx)
+ if elemType != types.StringType {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "expected list of string",
+ fmt.Sprintf("expected list element type to be string but got %s", elemType),
+ ),
+ }
+ }
+
+ elems, ok := settingsValue.([]interface{})
+ if !ok {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to convert setting to []string",
+ fmt.Sprintf("expected setting to be a []string but got %#v", settingsValue),
+ )}
+ }
+
+ var diags diag.Diagnostics
+ tfValue, diags = basetypes.NewListValueFrom(ctx, basetypes.StringType{}, elems)
+ if diags.HasError() {
+ return diags
+ }
+ case types.Set:
+ elemType := a.ElementType(ctx)
+ if elemType != types.StringType {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "expected set of string",
+ fmt.Sprintf("expected set element type to be string but got %s", elemType),
+ ),
+ }
+ }
+
+ elems, ok := settingsValue.([]interface{})
+ if !ok {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to convert setting to []string",
+ fmt.Sprintf("expected setting to be a thing []string but got %#v", settingsValue),
+ )}
+ }
+
+ var diags diag.Diagnostics
+ tfValue, diags = basetypes.NewSetValueFrom(ctx, basetypes.StringType{}, elems)
+ if diags.HasError() {
+ return diags
+ }
+ default:
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "unknown value type",
+ fmt.Sprintf("unknown index setting value type %s", a.Type(ctx)),
+ ),
+ }
+ }
+
+ ok = model.setFieldValueByTagValue(tfFieldKey, modelType, tfValue)
+ if !ok {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to find setting value",
+ fmt.Sprintf("expected setting with key %s", tfFieldKey),
+ ),
+ }
+ }
+ }
+
+ settingsBytes, err := json.Marshal(apiModel.Settings)
+ if err != nil {
+ return diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "failed to marshal raw settings",
+ err.Error(),
+ ),
+ }
+ }
+
+ model.SettingsRaw = jsontypes.NewNormalizedValue(string(settingsBytes))
+
+ return nil
+}
+
+func (model indexTfModel) getFieldValueByTagValue(tagName string, t reflect.Type) (attr.Value, bool) {
+ numField := t.NumField()
+ for i := 0; i < numField; i++ {
+ field := t.Field(i)
+ if field.Tag.Get("tfsdk") == tagName {
+ return reflect.ValueOf(model).Field(i).Interface().(attr.Value), true
+ }
+ }
+
+ return nil, false
+}
+
+func (model *indexTfModel) setFieldValueByTagValue(tagName string, t reflect.Type, value attr.Value) bool {
+ numField := t.NumField()
+ for i := 0; i < numField; i++ {
+ field := t.Field(i)
+ if field.Tag.Get("tfsdk") == tagName {
+ reflect.ValueOf(model).Elem().Field(i).Set(reflect.ValueOf(value))
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/internal/elasticsearch/index/indices/read.go b/internal/elasticsearch/index/indices/read.go
new file mode 100644
index 000000000..d32b1240b
--- /dev/null
+++ b/internal/elasticsearch/index/indices/read.go
@@ -0,0 +1,56 @@
+package indices
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+// Read refreshes the Terraform state with the latest data.
+func (d *dataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var stateModel tfModel
+
+ // Resolve target attribute
+ var target string
+ diags := req.Config.GetAttribute(ctx, path.Root("target"), &target)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // Call client API
+ indexApiModels, diags := elasticsearch.GetIndices(ctx, &d.client, target)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // Map response body to model
+ indices := []indexTfModel{}
+ for indexName, indexApiModel := range indexApiModels {
+ indexStateModel := indexTfModel{}
+
+ diags := indexStateModel.populateFromAPI(ctx, indexName, indexApiModel)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ indices = append(indices, indexStateModel)
+ }
+
+ indicesList, diags := types.ListValueFrom(ctx, indicesElementType(), indices)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ stateModel.ID = types.StringValue(target)
+ stateModel.Indices = indicesList
+
+ // Set state
+ resp.Diagnostics.Append(resp.State.Set(ctx, stateModel)...)
+}
diff --git a/internal/elasticsearch/index/indices/schema.go b/internal/elasticsearch/index/indices/schema.go
new file mode 100644
index 000000000..a5f6bd85c
--- /dev/null
+++ b/internal/elasticsearch/index/indices/schema.go
@@ -0,0 +1,434 @@
+package indices
+
+import (
+ "context"
+ "regexp"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index"
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils/customtypes"
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func (d *dataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = getSchema()
+}
+
+func getSchema() schema.Schema {
+ return schema.Schema{
+ Description: "Manages Elasticsearch indices. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Generated ID for the indices.",
+ Computed: true,
+ },
+ "target": schema.StringAttribute{
+ Description: "Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this attribute or use * or _all. ",
+ Optional: true,
+ },
+ "indices": schema.ListNestedAttribute{
+ Description: "The list of indices.",
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Internal identifier of the resource.",
+ Computed: true,
+ },
+ "name": schema.StringAttribute{
+ Description: "Name of the index.",
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.LengthBetween(1, 255),
+ stringvalidator.NoneOf(".", ".."),
+ stringvalidator.RegexMatches(regexp.MustCompile(`^[^-_+]`), "cannot start with -, _, +"),
+ stringvalidator.RegexMatches(regexp.MustCompile(`^[a-z0-9!$%&'()+.;=@[\]^{}~_-]+$`), "must contain lower case alphanumeric characters and selected punctuation, see: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params"),
+ },
+ },
+ // Static settings that can only be set on creation
+ "number_of_shards": schema.Int64Attribute{
+ Description: "Number of shards for the index. This can be set only on creation.",
+ Optional: true,
+ },
+ "number_of_routing_shards": schema.Int64Attribute{
+ Description: "Value used with number_of_shards to route documents to a primary shard. This can be set only on creation.",
+ Optional: true,
+ },
+ "codec": schema.StringAttribute{
+ Description: "The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` which uses DEFLATE for a higher compression ratio. This can be set only on creation.",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("best_compression"),
+ },
+ },
+ "routing_partition_size": schema.Int64Attribute{
+ Description: "The number of shards a custom routing value can go to. This can be set only on creation.",
+ Optional: true,
+ },
+ "load_fixed_bitset_filters_eagerly": schema.BoolAttribute{
+ Description: "Indicates whether cached filters are pre-loaded for nested queries. This can be set only on creation.",
+ Optional: true,
+ },
+ "shard_check_on_startup": schema.StringAttribute{
+ Description: "Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts `false`, `true`, `checksum`.",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("false", "true", "checksum"),
+ },
+ },
+ "sort_field": schema.SetAttribute{
+ ElementType: types.StringType,
+ Description: "The field to sort shards in this index by.",
+ Optional: true,
+ },
+ // sort_order can't be set type since it can have dup strings like ["asc", "asc"]
+ "sort_order": schema.ListAttribute{
+ ElementType: types.StringType,
+ Description: "The direction to sort shards in. Accepts `asc`, `desc`.",
+ Optional: true,
+ },
+ "mapping_coerce": schema.BoolAttribute{
+ Description: "Set index level coercion setting that is applied to all mapping types.",
+ Optional: true,
+ },
+ // Dynamic settings that can be changed at runtime
+ "number_of_replicas": schema.Int64Attribute{
+ Description: "Number of shard replicas.",
+ Optional: true,
+ Computed: true,
+ },
+ "auto_expand_replicas": schema.StringAttribute{
+ Description: "Set the number of replicas to the node count in the cluster. Set to a dash delimited lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all)",
+ Optional: true,
+ },
+ "search_idle_after": schema.StringAttribute{
+ Description: "How long a shard can not receive a search or get request until it’s considered search idle.",
+ Optional: true,
+ },
+ "refresh_interval": schema.StringAttribute{
+ Description: "How often to perform a refresh operation, which makes recent changes to the index visible to search. Can be set to `-1` to disable refresh.",
+ Optional: true,
+ },
+ "max_result_window": schema.Int64Attribute{
+ Description: "The maximum value of `from + size` for searches to this index.",
+ Optional: true,
+ },
+ "max_inner_result_window": schema.Int64Attribute{
+ Description: "The maximum value of `from + size` for inner hits definition and top hits aggregations to this index.",
+ Optional: true,
+ },
+ "max_rescore_window": schema.Int64Attribute{
+ Description: "The maximum value of `window_size` for `rescore` requests in searches of this index.",
+ Optional: true,
+ },
+ "max_docvalue_fields_search": schema.Int64Attribute{
+ Description: "The maximum number of `docvalue_fields` that are allowed in a query.",
+ Optional: true,
+ },
+ "max_script_fields": schema.Int64Attribute{
+ Description: "The maximum number of `script_fields` that are allowed in a query.",
+ Optional: true,
+ },
+ "max_ngram_diff": schema.Int64Attribute{
+ Description: "The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter.",
+ Optional: true,
+ },
+ "max_shingle_diff": schema.Int64Attribute{
+ Description: "The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter.",
+ Optional: true,
+ },
+ "max_refresh_listeners": schema.Int64Attribute{
+ Description: "Maximum number of refresh listeners available on each shard of the index.",
+ Optional: true,
+ },
+ "analyze_max_token_count": schema.Int64Attribute{
+ Description: "The maximum number of tokens that can be produced using _analyze API.",
+ Optional: true,
+ },
+ "highlight_max_analyzed_offset": schema.Int64Attribute{
+ Description: "The maximum number of characters that will be analyzed for a highlight request.",
+ Optional: true,
+ },
+ "max_terms_count": schema.Int64Attribute{
+ Description: "The maximum number of terms that can be used in Terms Query.",
+ Optional: true,
+ },
+ "max_regex_length": schema.Int64Attribute{
+ Description: "The maximum length of regex that can be used in Regexp Query.",
+ Optional: true,
+ },
+ "query_default_field": schema.SetAttribute{
+ ElementType: types.StringType,
+ Description: "Wildcard (*) patterns matching one or more fields. Defaults to '*', which matches all fields eligible for term-level queries, excluding metadata fields.",
+ Optional: true,
+ },
+ "routing_allocation_enable": schema.StringAttribute{
+ Description: "Controls shard allocation for this index. It can be set to: `all` , `primaries` , `new_primaries` , `none`.",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("all", "primaries", "new_primaries", "none"),
+ },
+ },
+ "routing_rebalance_enable": schema.StringAttribute{
+ Description: "Enables shard rebalancing for this index. It can be set to: `all`, `primaries` , `replicas` , `none`.",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("all", "primaries", "replicas", "none"),
+ },
+ },
+ "gc_deletes": schema.StringAttribute{
+ Description: "The length of time that a deleted document's version number remains available for further versioned operations.",
+ Optional: true,
+ },
+ "blocks_read_only": schema.BoolAttribute{
+ Description: "Set to `true` to make the index and index metadata read only, `false` to allow writes and metadata changes.",
+ Optional: true,
+ },
+ "blocks_read_only_allow_delete": schema.BoolAttribute{
+ Description: "Identical to `index.blocks.read_only` but allows deleting the index to free up resources.",
+ Optional: true,
+ },
+ "blocks_read": schema.BoolAttribute{
+ Description: "Set to `true` to disable read operations against the index.",
+ Optional: true,
+ },
+ "blocks_write": schema.BoolAttribute{
+ Description: "Set to `true` to disable data write operations against the index. This setting does not affect metadata.",
+ Optional: true,
+ },
+ "blocks_metadata": schema.BoolAttribute{
+ Description: "Set to `true` to disable index metadata reads and writes.",
+ Optional: true,
+ },
+ "default_pipeline": schema.StringAttribute{
+ Description: "The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist.",
+ Optional: true,
+ },
+ "final_pipeline": schema.StringAttribute{
+ Description: "Final ingest pipeline for the index. Indexing requests will fail if the final pipeline is set and the pipeline does not exist. The final pipeline always runs after the request pipeline (if specified) and the default pipeline (if it exists). The special pipeline name _none indicates no ingest pipeline will run.",
+ Optional: true,
+ },
+ "unassigned_node_left_delayed_timeout": schema.StringAttribute{
+ Description: "Time to delay the allocation of replica shards which become unassigned because a node has left, in time units, e.g. `10s`",
+ Optional: true,
+ },
+ "search_slowlog_threshold_query_warn": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `10s`",
+ Optional: true,
+ },
+ "search_slowlog_threshold_query_info": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `5s`",
+ Optional: true,
+ },
+ "search_slowlog_threshold_query_debug": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `2s`",
+ Optional: true,
+ },
+ "search_slowlog_threshold_query_trace": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `500ms`",
+ Optional: true,
+ },
+ "search_slowlog_threshold_fetch_warn": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `10s`",
+ Optional: true,
+ },
+ "search_slowlog_threshold_fetch_info": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `5s`",
+ Optional: true,
+ },
+ "search_slowlog_threshold_fetch_debug": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `2s`",
+ Optional: true,
+ },
+ "search_slowlog_threshold_fetch_trace": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `500ms`",
+ Optional: true,
+ },
+ "search_slowlog_level": schema.StringAttribute{
+ Description: "Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace`",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("warn", "info", "debug", "trace"),
+ },
+ },
+ "indexing_slowlog_threshold_index_warn": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `10s`",
+ Optional: true,
+ },
+ "indexing_slowlog_threshold_index_info": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `5s`",
+ Optional: true,
+ },
+ "indexing_slowlog_threshold_index_debug": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `2s`",
+ Optional: true,
+ },
+ "indexing_slowlog_threshold_index_trace": schema.StringAttribute{
+ Description: "Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `500ms`",
+ Optional: true,
+ },
+ "indexing_slowlog_level": schema.StringAttribute{
+ Description: "Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace`",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("warn", "info", "debug", "trace"),
+ },
+ },
+ "indexing_slowlog_source": schema.StringAttribute{
+ Description: "Set the number of characters of the `_source` to include in the slowlog lines, `false` or `0` will skip logging the source entirely and setting it to `true` will log the entire source regardless of size. The original `_source` is reformatted by default to make sure that it fits on a single log line.",
+ Optional: true,
+ },
+ // To change analyzer setting, the index must be closed, updated, and then reopened but it can't be handled in terraform.
+ // We raise error when they are tried to be updated instead of setting ForceNew not to have unexpected deletion.
+ "analysis_analyzer": schema.StringAttribute{
+ Description: "A JSON string describing the analyzers applied to the index.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{
+ index.StringIsJSONObject{},
+ },
+ },
+ "analysis_tokenizer": schema.StringAttribute{
+ Description: "A JSON string describing the tokenizers applied to the index.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{
+ index.StringIsJSONObject{},
+ },
+ },
+ "analysis_char_filter": schema.StringAttribute{
+ Description: "A JSON string describing the char_filters applied to the index.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{
+ index.StringIsJSONObject{},
+ },
+ },
+ "analysis_filter": schema.StringAttribute{
+ Description: "A JSON string describing the filters applied to the index.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{
+ index.StringIsJSONObject{},
+ },
+ },
+ "analysis_normalizer": schema.StringAttribute{
+ Description: "A JSON string describing the normalizers applied to the index.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{
+ index.StringIsJSONObject{},
+ },
+ },
+ "deletion_protection": schema.BoolAttribute{
+ Optional: true,
+ Computed: true,
+ Description: "Whether to allow Terraform to destroy the index. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply command that deletes the instance will fail.",
+ },
+ "include_type_name": schema.BoolAttribute{
+ Description: "If true, a mapping type is expected in the body of mappings. Defaults to false. Supported for Elasticsearch 7.x.",
+ Optional: true,
+ Computed: true,
+ },
+ "wait_for_active_shards": schema.StringAttribute{
+ Description: "The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (number_of_replicas+1). Default: `1`, the primary shard. This value is ignored when running against Serverless projects.",
+ Optional: true,
+ Computed: true,
+ },
+ "master_timeout": schema.StringAttribute{
+ Description: "Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. This value is ignored when running against Serverless projects.",
+ Optional: true,
+ Computed: true,
+ CustomType: customtypes.DurationType{},
+ },
+ "timeout": schema.StringAttribute{
+ Description: "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`.",
+ Optional: true,
+ Computed: true,
+ CustomType: customtypes.DurationType{},
+ },
+ "mappings": schema.StringAttribute{
+ Description: `Mapping for fields in the index.
+ If specified, this mapping can include: field names, [field data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html), [mapping parameters](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html).
+ **NOTE:**
+ - Changing datatypes in the existing _mappings_ will force index to be re-created.
+ - Removing field will be ignored by default same as elasticsearch. You need to recreate the index to remove field completely.
+ `,
+ Optional: true,
+ Computed: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{
+ index.StringIsJSONObject{},
+ },
+ },
+ "settings_raw": schema.StringAttribute{
+ Description: "All raw settings fetched from the cluster.",
+ Computed: true,
+ CustomType: jsontypes.NormalizedType{},
+ },
+ // "settings": getSettingsSchema(),
+ "alias": getAliasSchema(),
+ },
+ },
+ },
+ },
+ }
+}
+
+func getAliasSchema() schema.SetNestedAttribute {
+ return schema.SetNestedAttribute{
+ Description: "Aliases for the index.",
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ Description: "Index alias name.",
+ Required: true,
+ },
+ "filter": schema.StringAttribute{
+ Description: "Query used to limit documents the alias can access.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ },
+ "index_routing": schema.StringAttribute{
+ Description: "Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations.",
+ Optional: true,
+ Computed: true,
+ },
+ "is_hidden": schema.BoolAttribute{
+ Description: "If true, the alias is hidden.",
+ Optional: true,
+ Computed: true,
+ },
+ "is_write_index": schema.BoolAttribute{
+ Description: "If true, the index is the write index for the alias.",
+ Optional: true,
+ Computed: true,
+ },
+ "routing": schema.StringAttribute{
+ Description: "Value used to route indexing and search operations to a specific shard.",
+ Optional: true,
+ Computed: true,
+ },
+ "search_routing": schema.StringAttribute{
+ Description: "Value used to route search operations to a specific shard. If specified, this overwrites the routing value for search operations.",
+ Optional: true,
+ Computed: true,
+ },
+ },
+ },
+ }
+}
+
+func indicesElementType() attr.Type {
+ return getSchema().Attributes["indices"].GetType().(attr.TypeWithElementType).ElementType()
+}
+
+func aliasElementType() attr.Type {
+ return getAliasSchema().GetType().(attr.TypeWithElementType).ElementType()
+}
diff --git a/internal/utils/utils.go b/internal/utils/utils.go
index 9801df9ea..9209ecd19 100644
--- a/internal/utils/utils.go
+++ b/internal/utils/utils.go
@@ -19,6 +19,20 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
+func ConvertSDKDiagnosticsToFramework(sdkDiags sdkdiag.Diagnostics) fwdiag.Diagnostics {
+ var fwDiags fwdiag.Diagnostics
+
+ for _, sdkDiag := range sdkDiags {
+ if sdkDiag.Severity == sdkdiag.Error {
+ fwDiags.AddError(sdkDiag.Summary, sdkDiag.Detail)
+ } else {
+ fwDiags.AddWarning(sdkDiag.Summary, sdkDiag.Detail)
+ }
+ }
+
+ return fwDiags
+}
+
func CheckError(res *esapi.Response, errMsg string) sdkdiag.Diagnostics {
var diags sdkdiag.Diagnostics
diff --git a/provider/plugin_framework.go b/provider/plugin_framework.go
index 2d15aa0b2..6f34f912d 100644
--- a/provider/plugin_framework.go
+++ b/provider/plugin_framework.go
@@ -6,6 +6,7 @@ import (
"github.com/elastic/terraform-provider-elasticstack/internal/clients"
"github.com/elastic/terraform-provider-elasticstack/internal/clients/config"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/index"
+ "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/indices"
"github.com/elastic/terraform-provider-elasticstack/internal/kibana/data_view"
"github.com/elastic/terraform-provider-elasticstack/internal/kibana/import_saved_objects"
"github.com/elastic/terraform-provider-elasticstack/internal/kibana/spaces"
@@ -69,6 +70,7 @@ func (p *Provider) Configure(ctx context.Context, req fwprovider.ConfigureReques
func (p *Provider) DataSources(ctx context.Context) []func() datasource.DataSource {
return []func() datasource.DataSource{
+ indices.NewDataSource,
spaces.NewDataSource,
}
}
diff --git a/templates/data-sources/elasticsearch_indices.md.tmpl b/templates/data-sources/elasticsearch_indices.md.tmpl
new file mode 100644
index 000000000..33075a2f0
--- /dev/null
+++ b/templates/data-sources/elasticsearch_indices.md.tmpl
@@ -0,0 +1,17 @@
+---
+subcategory: "Index"
+layout: ""
+page_title: "Elasticstack: elasticstack_elasticsearch_indices Data Source"
+description: |-
+ Retrieves indices.
+---
+
+# Data Source: elasticstack_elasticsearch_indices
+
+Use this data source to retrieve and get information about existing Elasticsearch indices. See, https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html
+
+## Example Usage
+
+{{ tffile "examples/data-sources/elasticstack_elasticsearch_indices/data-source.tf" }}
+
+{{ .SchemaMarkdown | trimspace }}