Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
## [Unreleased]

- Fix secret handling `elasticstack_fleet_integration_policy` resource. ([#821](https://github.com/elastic/terraform-provider-elasticstack/pull/821))
- Add `aliases` attribute to `elasticstack_elasticsearch_transform` resource. ([#825](https://github.com/elastic/terraform-provider-elasticstack/pull/825))
- Add `description` attribute to `elasticstack_elasticsearch_security_role` resource. ([#824](https://github.com/elastic/terraform-provider-elasticstack/pull/824))
- Fix merge values for `elasticstack_kibana_synthetics_monitor` monitor locations ([#823](https://github.com/elastic/terraform-provider-elasticstack/pull/823)
- Add `elasticstack_elasticsearch_index_template` data source ([#828](https://github.com/elastic/terraform-provider-elasticstack/pull/828))
Expand Down
27 changes: 25 additions & 2 deletions docs/resources/elasticsearch_transform.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,16 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" {

destination {
index = "destination_index_for_transform"

aliases {
alias = "test_alias_1"
move_on_creation = true
}

aliases {
alias = "test_alias_2"
move_on_creation = false
}
}

pivot = jsonencode({
Expand Down Expand Up @@ -85,7 +95,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" {
- `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. Default is `false`
- `description` (String) Free text description of the transform.
- `docs_per_second` (Number) Specifies a limit on the number of input documents per second. Default (unset) value disables throttling.
- `enabled` (Boolean) Controls wether the transform should be started or stopped. Default is `false` (stopped).
- `enabled` (Boolean) Controls whether the transform should be started or stopped. Default is `false` (stopped).
- `frequency` (String) The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`.
- `latest` (String) The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present.
- `max_page_search_size` (Number) Defines the initial page size to use for the composite aggregation for each checkpoint. Default is 500.
Expand All @@ -94,7 +104,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" {
- `pivot` (String) The pivot method transforms the data by aggregating and grouping it. JSON definition expected. Either 'pivot' or 'latest' must be present.
- `retention_policy` (Block List, Max: 1) Defines a retention policy for the transform. (see [below for nested schema](#nestedblock--retention_policy))
- `sync` (Block List, Max: 1) Defines the properties transforms require to run continuously. (see [below for nested schema](#nestedblock--sync))
- `timeout` (String) Period to wait for a response from Elastisearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`.
- `timeout` (String) Period to wait for a response from Elasticsearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`.
- `unattended` (Boolean) In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails.

### Read-Only
Expand All @@ -110,8 +120,21 @@ Required:

Optional:

- `aliases` (Block List) The aliases that the destination index for the transform should have. (see [below for nested schema](#nestedblock--destination--aliases))
- `pipeline` (String) The unique identifier for an ingest pipeline.

<a id="nestedblock--destination--aliases"></a>
### Nested Schema for `destination.aliases`

Required:

- `alias` (String) The name of the alias.

Optional:

- `move_on_creation` (Boolean) Whether the destination index should be the only index in this alias. Defaults to false.



<a id="nestedblock--source"></a>
### Nested Schema for `source`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,16 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" {

destination {
index = "destination_index_for_transform"

aliases {
alias = "test_alias_1"
move_on_creation = true
}

aliases {
alias = "test_alias_2"
move_on_creation = false
}
}

pivot = jsonencode({
Expand Down
73 changes: 57 additions & 16 deletions internal/elasticsearch/transform/transform.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ func init() {

// capabilities
settingsRequiredVersions["destination.pipeline"] = version.Must(version.NewVersion("7.3.0"))
settingsRequiredVersions["destination.aliases"] = version.Must(version.NewVersion("8.8.0"))
settingsRequiredVersions["frequency"] = version.Must(version.NewVersion("7.3.0"))
settingsRequiredVersions["latest"] = version.Must(version.NewVersion("7.11.0"))
settingsRequiredVersions["retention_policy"] = version.Must(version.NewVersion("7.12.0"))
Expand Down Expand Up @@ -116,6 +117,26 @@ func ResourceTransform() *schema.Resource {
validation.StringMatch(regexp.MustCompile(`^[a-z0-9!$%&'()+.;=@[\]^{}~_-]+$`), "must contain lower case alphanumeric characters and selected punctuation, see: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params"),
),
},
"aliases": {
Description: "The aliases that the destination index for the transform should have.",
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"alias": {
Description: "The name of the alias.",
Type: schema.TypeString,
Required: true,
},
"move_on_creation": {
Description: "Whether the destination index should be the only index in this alias. Defaults to false.",
Type: schema.TypeBool,
Optional: true,
Default: false,
},
},
},
},
"pipeline": {
Description: "The unique identifier for an ingest pipeline.",
Type: schema.TypeString,
Expand Down Expand Up @@ -267,14 +288,14 @@ func ResourceTransform() *schema.Resource {
},
"timeout": {
Type: schema.TypeString,
Description: "Period to wait for a response from Elastisearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`.",
Description: "Period to wait for a response from Elasticsearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`.",
Optional: true,
Default: "30s",
ValidateFunc: utils.StringIsDuration,
},
"enabled": {
Type: schema.TypeBool,
Description: "Controls wether the transform should be started or stopped. Default is `false` (stopped).",
Description: "Controls whether the transform should be started or stopped. Default is `false` (stopped).",
Optional: true,
Default: false,
},
Expand Down Expand Up @@ -365,7 +386,7 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int
return diags
}

if err := updateResourceDataFromModel(ctx, d, transform); err != nil {
if err := updateResourceDataFromModel(d, transform); err != nil {
return diag.FromErr(err)
}

Expand All @@ -375,7 +396,7 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int
return diags
}

if err := updateResourceDataFromStats(ctx, d, transformStats); err != nil {
if err := updateResourceDataFromStats(d, transformStats); err != nil {
return diag.FromErr(err)
}

Expand Down Expand Up @@ -485,13 +506,23 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n
}

if v, ok := d.GetOk("destination"); ok {

definedDestination := v.([]interface{})[0].(map[string]interface{})

transform.Destination = &models.TransformDestination{
Index: definedDestination["index"].(string),
}

if aliases, ok := definedDestination["aliases"].([]interface{}); ok && len(aliases) > 0 && isSettingAllowed(ctx, "destination.aliases", serverVersion) {
transform.Destination.Aliases = make([]models.TransformAlias, len(aliases))
for i, alias := range aliases {
aliasMap := alias.(map[string]interface{})
transform.Destination.Aliases[i] = models.TransformAlias{
Alias: aliasMap["alias"].(string),
MoveOnCreation: aliasMap["move_on_creation"].(bool),
}
}
}

if pipeline, ok := definedDestination["pipeline"]; ok && isSettingAllowed(ctx, "destination.pipeline", serverVersion) {
transform.Destination.Pipeline = pipeline.(string)
}
Expand Down Expand Up @@ -608,7 +639,7 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n
return &transform, nil
}

func updateResourceDataFromModel(ctx context.Context, d *schema.ResourceData, transform *models.Transform) error {
func updateResourceDataFromModel(d *schema.ResourceData, transform *models.Transform) error {

// transform.Description
if err := d.Set("description", transform.Description); err != nil {
Expand Down Expand Up @@ -724,7 +755,7 @@ func updateResourceDataFromModel(ctx context.Context, d *schema.ResourceData, tr
return nil
}

func updateResourceDataFromStats(ctx context.Context, d *schema.ResourceData, transformStats *models.TransformStats) error {
func updateResourceDataFromStats(d *schema.ResourceData, transformStats *models.TransformStats) error {

// transform.Enabled
if err := d.Set("enabled", transformStats.IsStarted()); err != nil {
Expand Down Expand Up @@ -774,9 +805,19 @@ func flattenDestination(dest *models.TransformDestination) []interface{} {
}

d := make(map[string]interface{})

d["index"] = dest.Index

if len(dest.Aliases) > 0 {
aliases := make([]interface{}, len(dest.Aliases))
for i, alias := range dest.Aliases {
aliasMap := make(map[string]interface{})
aliasMap["alias"] = alias.Alias
aliasMap["move_on_creation"] = alias.MoveOnCreation
aliases[i] = aliasMap
}
d["aliases"] = aliases
}

if dest.Pipeline != "" {
d["pipeline"] = dest.Pipeline
}
Expand All @@ -789,18 +830,18 @@ func flattenSync(sync *models.TransformSync) []interface{} {
return nil
}

time := make(map[string]interface{})
t := make(map[string]interface{})

if sync.Time.Delay != "" {
time["delay"] = sync.Time.Delay
t["delay"] = sync.Time.Delay
}

if sync.Time.Field != "" {
time["field"] = sync.Time.Field
t["field"] = sync.Time.Field
}

s := make(map[string]interface{})
s["time"] = []interface{}{time}
s["time"] = []interface{}{t}

return []interface{}{s}
}
Expand All @@ -810,18 +851,18 @@ func flattenRetentionPolicy(retention *models.TransformRetentionPolicy) []interf
return []interface{}{}
}

time := make(map[string]interface{})
t := make(map[string]interface{})

if retention.Time.MaxAge != "" {
time["max_age"] = retention.Time.MaxAge
t["max_age"] = retention.Time.MaxAge
}

if retention.Time.Field != "" {
time["field"] = retention.Time.Field
t["field"] = retention.Time.Field
}

r := make(map[string]interface{})
r["time"] = []interface{}{time}
r["time"] = []interface{}{t}

return []interface{}{r}
}
Expand Down
Loading
Loading