diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d6920c..ccec360 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ENHANCEMENTS: -* Supports `dynamic` block for `tags`, `labels` and `regions_config` +* Supports `dynamic` blocks for `tags`, `labels`, `regions_config` and `replication_specs` ## 1.0.0 (Mar 6, 2025) diff --git a/README.md b/README.md index 69fd104..eb3839c 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,8 @@ atlas plugin list ### Usage +You can find more information in the [Migration Guide: Cluster to Advanced Cluster](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/cluster-to-advanced-cluster-migration-guide). + **Note**: In order to use the **Preview for MongoDB Atlas Provider 2.0.0** of `mongodbatlas_advanced_cluster`, you need to set the environment variable `MONGODB_ATLAS_PREVIEW_PROVIDER_V2_ADVANCED_CLUSTER` to `true`. If you want to convert a Terraform configuration from `mongodbatlas_cluster` to `mongodbatlas_advanced_cluster`, use the following command: @@ -75,31 +77,51 @@ dynamic "tags" { #### Dynamic blocks in regions_config -You can use `dynamic` blocks for `regions_config`. The plugin assumes that `for_each` has an expression which is evaluated to a `list` or `set` of objects. +You can use `dynamic` blocks for `regions_config`. The plugin assumes that `for_each` has an expression which is evaluated to a `list` or `set` of objects. See this [guide](./docs/guide_clu2adv_dynamic_block.md) to learn more about some limitations. This is an example of how to use dynamic blocks in `regions_config`: ```hcl - replication_specs { - num_shards = var.replication_specs.num_shards - zone_name = var.replication_specs.zone_name # only needed if you're using zones +replication_specs { + num_shards = var.replication_specs.num_shards + zone_name = var.replication_specs.zone_name # only needed if you're using zones + dynamic "regions_config" { + for_each = var.replication_specs.regions_config + content { + priority = regions_config.value.priority + region_name = regions_config.value.region_name + electable_nodes = regions_config.value.electable_nodes + read_only_nodes = regions_config.value.read_only_nodes + } + } +} +``` + +#### Dynamic blocks in replication_specs + +You can use `dynamic` blocks for `replication_specs`. The plugin assumes that `for_each` has an expression which is evaluated to a `list` of objects. See this [guide](./docs/guide_clu2adv_dynamic_block.md) to learn more about some limitations. +This is an example of how to use dynamic blocks in `replication_specs`: +```hcl +dynamic "replication_specs" { + for_each = var.replication_specs + content { + num_shards = replication_specs.value.num_shards + zone_name = replication_specs.value.zone_name # only needed if you're using zones dynamic "regions_config" { - for_each = var.replication_specs.regions_config + for_each = replication_specs.value.regions_config content { - priority = regions_config.value.priority - region_name = regions_config.value.region_name electable_nodes = regions_config.value.electable_nodes + priority = regions_config.value.priority read_only_nodes = regions_config.value.read_only_nodes + region_name = regions_config.value.region_name } } } +} ``` -Dynamic block and individual blocks for `regions_config` are not supported at the same time. If you need this use case, please send us [feedback](https://github.com/mongodb-labs/atlas-cli-plugin-terraform/issues). There are currently two main approaches to handle this: -- (Recommended) Remove the individual `regions_config` blocks and add their information to the variable you're using in the `for_each` expression, e.g. using [concat](https://developer.hashicorp.com/terraform/language/functions/concat) if you're using a list or [setunion](https://developer.hashicorp.com/terraform/language/functions/setunion) for sets. In this way, you don't need to change the generated `mongodb_advanced_cluster` configuration. -- Change the generated `mongodb_advanced_cluster` configuration to join the individual blocks to the code generated for the `dynamic` block. This approach is more error-prone. ### Limitations - [`num_shards`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/cluster#num_shards-2) in `replication_specs` must be a numeric [literal expression](https://developer.hashicorp.com/nomad/docs/job-specification/hcl2/expressions#literal-expressions), e.g. `var.num_shards` is not supported. This is to allow creating a `replication_specs` element per shard in `mongodbatlas_advanced_cluster`. This limitation doesn't apply if you're using `dynamic` blocks in `regions_config` or `replication_specs`. -- `dynamic` blocks are currently supported only for `tags`, `labels` and `regions_config`. See limitations for `regions_config` support in [its section](#dynamic-blocks-in-regions_config) above. **Coming soon**: support for `replication_specs`. +- `dynamic` blocks are supported with some [limitations](./docs/guide_clu2adv_dynamic_block.md). ## Feedback diff --git a/docs/guide_clu2adv_dynamic_block.md b/docs/guide_clu2adv_dynamic_block.md new file mode 100644 index 0000000..667039c --- /dev/null +++ b/docs/guide_clu2adv_dynamic_block.md @@ -0,0 +1,98 @@ +# Guide to handle dynamic block limitations in regions_config and replication_specs + +The plugin command to convert `mongodbatlas_cluster` resources to `mongodbatlas_advanced_cluster` supports `dynamic` blocks for `regions_config` and `replication_specs`. However, there are some limitations when using `dynamic` blocks in these fields. This guide explains how to handle these limitations. + +If you need to use the plugin for use cases not yet supported, please send us [feedback](https://github.com/mongodb-labs/atlas-cli-plugin-terraform/issues). + +## Dynamic block and individual blocks in the same resource + +Dynamic block and individual blocks for `regions_config` or `replication_specs` are not supported at the same time. The recommended way to handle this is to remove the individual `regions_config` or `replication_specs` blocks and use a local variable to add the individual block information to the variable you're using in the `for_each` expression, using [concat](https://developer.hashicorp.com/terraform/language/functions/concat) if you're using a list or [setunion](https://developer.hashicorp.com/terraform/language/functions/setunion) for sets. + +Let's see an example with `regions_config`, it is the same for `replication_specs`. In the original configuration file, the `mongodb_cluster` resource is used inside a module that receives the `regions_config` elements in a `list` variable and we want to add an additional `region_config` with a read-only node. +```hcl +variable "replication_specs" { + type = object({ + num_shards = number + regions_config = list(object({ + region_name = string + electable_nodes = number + priority = number + read_only_nodes = number + })) + }) +} + +resource "mongodbatlas_cluster" "this" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + provider_name = var.provider_name + provider_instance_size_name = var.provider_instance_size_name + replication_specs { + num_shards = var.replication_specs.num_shards + dynamic "regions_config" { + for_each = var.replication_specs.regions_config + content { + region_name = regions_config.value.region_name + electable_nodes = regions_config.value.electable_nodes + priority = regions_config.value.priority + read_only_nodes = regions_config.value.read_only_nodes + } + } + regions_config { # individual region + region_name = "US_EAST_1" + read_only_nodes = 1 + } + } +} +``` + +We modify the configuration file to create an intermediate `local` variable to merge the `regions_config` variable elements and the additional `region_config`: +```hcl +variable "replication_specs" { + type = object({ + num_shards = number + regions_config = list(object({ + region_name = string + electable_nodes = number + priority = number + read_only_nodes = number + })) + }) +} + +locals { + regions_config_all = concat( + var.replication_specs.regions_config, + [ + { + region_name = "US_EAST_1" + electable_nodes = 0 + priority = 0 + read_only_nodes = 1 + }, + ] + ) +} + +resource "mongodbatlas_cluster" "this" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + provider_name = var.provider_name + provider_instance_size_name = var.provider_instance_size_name + replication_specs { + num_shards = var.replication_specs.num_shards + dynamic "regions_config" { + for_each = local.regions_config_all # changed to use the local variable + content { + region_name = regions_config.value.region_name + electable_nodes = regions_config.value.electable_nodes + priority = regions_config.value.priority + read_only_nodes = regions_config.value.read_only_nodes + } + } + } +} +``` +This modified configuration file has the same behavior as the original one, but it doesn't have individual blocks anymore, only the `dynamic` block, so it is supported by the plugin. diff --git a/internal/convert/const_names.go b/internal/convert/const_names.go index c2adc20..8696bcc 100644 --- a/internal/convert/const_names.go +++ b/internal/convert/const_names.go @@ -55,4 +55,5 @@ const ( nForEach = "for_each" nContent = "content" nRegion = "region" + nSpec = "spec" ) diff --git a/internal/convert/convert.go b/internal/convert/convert.go index 8482b2c..23b2d32 100644 --- a/internal/convert/convert.go +++ b/internal/convert/convert.go @@ -37,7 +37,7 @@ const ( ) var ( - dynamicBlockAllowList = []string{nTags, nLabels, nConfigSrc} + dynamicBlockAllowList = []string{nTags, nLabels, nConfigSrc, nRepSpecs} ) type attrVals struct { @@ -91,10 +91,10 @@ func convertResource(block *hclwrite.Block) (bool, error) { } var err error - if blockb.FirstMatchingBlock(nRepSpecs, nil) != nil { - err = fillCluster(blockb) - } else { + if isFreeTierCluster(blockb) { err = fillFreeTierCluster(blockb) + } else { + err = fillCluster(blockb) } if err != nil { return false, err @@ -102,6 +102,11 @@ func convertResource(block *hclwrite.Block) (bool, error) { return true, nil } +func isFreeTierCluster(resourceb *hclwrite.Body) bool { + d, _ := getDynamicBlock(resourceb, nRepSpecs) + return resourceb.FirstMatchingBlock(nRepSpecs, nil) == nil && !d.IsPresent() +} + func convertDataSource(block *hclwrite.Block) bool { if block.Type() != dataSourceType { return false @@ -190,6 +195,15 @@ func fillCluster(resourceb *hclwrite.Body) error { } func fillReplicationSpecs(resourceb *hclwrite.Body, root attrVals) error { + d, err := fillReplicationSpecsWithDynamicBlock(resourceb, root) + if err != nil { + return err + } + if d.IsPresent() { + resourceb.RemoveBlock(d.block) + resourceb.SetAttributeRaw(nRepSpecs, d.tokens) + return nil + } // at least one replication_specs exists here, if not it would be a free tier cluster var specbs []*hclwrite.Body for { @@ -202,7 +216,7 @@ func fillReplicationSpecs(resourceb *hclwrite.Body, root attrVals) error { break } specbSrc := specSrc.Body() - d, err := fillReplicationSpecsWithDynamicRegionConfigs(specbSrc, root) + d, err := fillReplicationSpecsWithDynamicRegionConfigs(specbSrc, root, false) if err != nil { return err } @@ -312,8 +326,26 @@ func fillBlockOpt(resourceb *hclwrite.Body, name string) { resourceb.SetAttributeRaw(name, hcl.TokensObject(block.Body())) } +// fillReplicationSpecsWithDynamicBlock used for dynamic blocks in replication_specs +func fillReplicationSpecsWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dynamicBlock, error) { + dSpec, err := getDynamicBlock(resourceb, nRepSpecs) + if err != nil || !dSpec.IsPresent() { + return dynamicBlock{}, err + } + transformDynamicBlockReferences(dSpec.content.Body(), nRepSpecs, nSpec) + dConfig, err := fillReplicationSpecsWithDynamicRegionConfigs(dSpec.content.Body(), root, true) + if err != nil { + return dynamicBlock{}, err + } + forSpec := hcl.TokensFromExpr(fmt.Sprintf("for %s in %s : ", nSpec, hcl.GetAttrExpr(dSpec.forEach))) + forSpec = append(forSpec, dConfig.tokens...) + tokens := hcl.TokensFuncFlatten(forSpec) + dSpec.tokens = tokens + return dSpec, nil +} + // fillReplicationSpecsWithDynamicRegionConfigs is used for dynamic blocks in region_configs -func fillReplicationSpecsWithDynamicRegionConfigs(specbSrc *hclwrite.Body, root attrVals) (dynamicBlock, error) { +func fillReplicationSpecsWithDynamicRegionConfigs(specbSrc *hclwrite.Body, root attrVals, transformRegionReferences bool) (dynamicBlock, error) { d, err := getDynamicBlock(specbSrc, nConfigSrc) if err != nil || !d.IsPresent() { return dynamicBlock{}, err @@ -323,7 +355,11 @@ func fillReplicationSpecsWithDynamicRegionConfigs(specbSrc *hclwrite.Body, root if zoneName := hcl.GetAttrExpr(specbSrc.GetAttribute(nZoneName)); zoneName != "" { repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneName)) } - regionFor, err := getDynamicBlockRegionConfigsRegionArray(d, root) + forEach := hcl.GetAttrExpr(d.forEach) + if transformRegionReferences { + forEach = replaceDynamicBlockReferences(forEach, nRepSpecs, nSpec) + } + regionFor, err := getDynamicBlockRegionConfigsRegionArray(forEach, d.content, root) if err != nil { return dynamicBlock{}, err } @@ -414,7 +450,7 @@ func getSpecs(configSrc *hclwrite.Block, countName string, root attrVals, isDyna } tokens := hcl.TokensObject(fileb) if isDynamicBlock { - tokens = encloseDynamicBlockRegionSpec(tokens, countName) + tokens = append(hcl.TokensFromExpr(fmt.Sprintf("%s == 0 ? null :", hcl.GetAttrExpr(count))), tokens...) } return tokens, nil } @@ -520,41 +556,38 @@ func replaceDynamicBlockExpr(attr *hclwrite.Attribute, blockName, attrName strin return strings.ReplaceAll(expr, fmt.Sprintf("%s.%s", blockName, attrName), attrName) } -func encloseDynamicBlockRegionSpec(specTokens hclwrite.Tokens, countName string) hclwrite.Tokens { - tokens := hcl.TokensFromExpr(fmt.Sprintf("%s.%s > 0 ?", nRegion, countName)) - tokens = append(tokens, specTokens...) - return append(tokens, hcl.TokensFromExpr(": null")...) -} - // getDynamicBlockRegionConfigsRegionArray returns the region array for a dynamic block in replication_specs. // e.g. [ for region in var.replication_specs.regions_config : { ... } if priority == region.priority ] -func getDynamicBlockRegionConfigsRegionArray(d dynamicBlock, root attrVals) (hclwrite.Tokens, error) { - transformDynamicBlockReferences(d.content.Body()) - priorityStr := hcl.GetAttrExpr(d.content.Body().GetAttribute(nPriority)) +func getDynamicBlockRegionConfigsRegionArray(forEach string, configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { + transformDynamicBlockReferences(configSrc.Body(), nConfigSrc, nRegion) + priorityStr := hcl.GetAttrExpr(configSrc.Body().GetAttribute(nPriority)) if priorityStr == "" { return nil, fmt.Errorf("%s: %s not found", errRepSpecs, nPriority) } - region, err := getRegionConfig(d.content, root, true) + region, err := getRegionConfig(configSrc, root, true) if err != nil { return nil, err } - tokens := hcl.TokensFromExpr(fmt.Sprintf("for %s in %s :", nRegion, hcl.GetAttrExpr(d.forEach))) + tokens := hcl.TokensFromExpr(fmt.Sprintf("for %s in %s :", nRegion, forEach)) tokens = append(tokens, hcl.EncloseBraces(region.BuildTokens(nil), true)...) tokens = append(tokens, hcl.TokensFromExpr(fmt.Sprintf("if %s == %s", nPriority, priorityStr))...) return hcl.EncloseBracketsNewLines(tokens), nil } -// transformDynamicBlockReferences changes value references in all attributes, e.g. regions_config.value.electable_nodes to region.electable_nodes -func transformDynamicBlockReferences(configSrcb *hclwrite.Body) { +func transformDynamicBlockReferences(configSrcb *hclwrite.Body, blockName, varName string) { for name, attr := range configSrcb.Attributes() { - expr := hcl.GetAttrExpr(attr) - expr = strings.ReplaceAll(expr, - fmt.Sprintf("%s.%s.", nConfigSrc, nValue), - fmt.Sprintf("%s.", nRegion)) + expr := replaceDynamicBlockReferences(hcl.GetAttrExpr(attr), blockName, varName) configSrcb.SetAttributeRaw(name, hcl.TokensFromExpr(expr)) } } +// replaceDynamicBlockReferences changes value references, e.g. regions_config.value.electable_nodes to region.electable_nodes +func replaceDynamicBlockReferences(expr, blockName, varName string) string { + return strings.ReplaceAll(expr, + fmt.Sprintf("%s.%s.", blockName, nValue), + fmt.Sprintf("%s.", varName)) +} + func sortConfigsByPriority(configs []*hclwrite.Body) []*hclwrite.Body { for _, config := range configs { if _, err := hcl.GetAttrInt(config.GetAttribute(nPriority), errPriority); err != nil { diff --git a/internal/convert/testdata/clu2adv/dynamic_regions_config_auto_scaling.out.tf b/internal/convert/testdata/clu2adv/dynamic_regions_config_auto_scaling.out.tf index 62bbd5f..2aba85d 100644 --- a/internal/convert/testdata/clu2adv/dynamic_regions_config_auto_scaling.out.tf +++ b/internal/convert/testdata/clu2adv/dynamic_regions_config_auto_scaling.out.tf @@ -23,27 +23,27 @@ resource "mongodbatlas_advanced_cluster" "cluster" { provider_name = var.provider_name region_name = region.region_name priority = region.priority - electable_specs = region.electable_nodes > 0 ? { + electable_specs = region.electable_nodes == 0 ? null : { node_count = region.electable_nodes instance_size = var.provider_instance_size_name disk_size_gb = var.disk_size_gb ebs_volume_type = var.provider_volume_type disk_iops = var.provider_disk_iops - } : null - read_only_specs = region.read_only_nodes > 0 ? { + } + read_only_specs = region.read_only_nodes == 0 ? null : { node_count = region.read_only_nodes instance_size = var.provider_instance_size_name disk_size_gb = var.disk_size_gb ebs_volume_type = var.provider_volume_type disk_iops = var.provider_disk_iops - } : null - analytics_specs = region.analytics_nodes > 0 ? { + } + analytics_specs = region.analytics_nodes == 0 ? null : { node_count = region.analytics_nodes instance_size = var.provider_instance_size_name disk_size_gb = var.disk_size_gb ebs_volume_type = var.provider_volume_type disk_iops = var.provider_disk_iops - } : null + } auto_scaling = { disk_gb_enabled = var.auto_scaling_disk_gb_enabled } diff --git a/internal/convert/testdata/clu2adv/dynamic_regions_config_basic.out.tf b/internal/convert/testdata/clu2adv/dynamic_regions_config_basic.out.tf index a2f1050..334f3f8 100644 --- a/internal/convert/testdata/clu2adv/dynamic_regions_config_basic.out.tf +++ b/internal/convert/testdata/clu2adv/dynamic_regions_config_basic.out.tf @@ -12,14 +12,14 @@ resource "mongodbatlas_advanced_cluster" "dynamic_regions_config" { provider_name = "AWS" region_name = region.region_name priority = region.prio - electable_specs = region.electable_nodes > 0 ? { + electable_specs = region.electable_nodes == 0 ? null : { node_count = region.electable_nodes instance_size = "M10" - } : null - read_only_specs = region.read_only_nodes > 0 ? { + } + read_only_specs = region.read_only_nodes == 0 ? null : { node_count = region.read_only_nodes instance_size = "M10" - } : null + } } if priority == region.prio ] ]) diff --git a/internal/convert/testdata/clu2adv/dynamic_replication_specs_basic.in.tf b/internal/convert/testdata/clu2adv/dynamic_replication_specs_basic.in.tf new file mode 100644 index 0000000..b26d4aa --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_replication_specs_basic.in.tf @@ -0,0 +1,87 @@ +# Based on https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/examples/migrate_cluster_to_advanced_cluster/module_maintainer/v1/main.tf +resource "mongodbatlas_cluster" "this" { + lifecycle { + precondition { + condition = !(var.auto_scaling_disk_gb_enabled && var.disk_size > 0) + error_message = "Must use either auto_scaling_disk_gb_enabled or disk_size, not both." + } + } + + project_id = var.project_id + name = var.cluster_name + auto_scaling_disk_gb_enabled = var.auto_scaling_disk_gb_enabled + cluster_type = var.cluster_type + disk_size_gb = var.disk_size + mongo_db_major_version = var.mongo_db_major_version + provider_instance_size_name = var.instance_size + provider_name = var.provider_name + + dynamic "tags" { + for_each = var.tags + content { + key = tags.key + value = tags.value + } + } + + dynamic "replication_specs" { + for_each = var.replication_specs + content { + num_shards = replication_specs.value.num_shards + zone_name = replication_specs.value.zone_name + + dynamic "regions_config" { + for_each = replication_specs.value.regions_config + content { + electable_nodes = regions_config.value.electable_nodes + priority = regions_config.value.priority + read_only_nodes = regions_config.value.read_only_nodes + region_name = regions_config.value.region_name + } + } + } + } +} + +# example of variable for demostration purposes, not used in the conversion +variable "replication_specs" { + description = "List of replication specifications in mongodbatlas_cluster format" + type = list(object({ + num_shards = number + zone_name = string + regions_config = list(object({ + region_name = string + electable_nodes = number + priority = number + read_only_nodes = optional(number, 0) + })) + })) + default = [ + { + num_shards = 1 + zone_name = "Zone 1" + regions_config = [ + { + region_name = "US_EAST_1" + electable_nodes = 3 + priority = 7 + } + ] + }, { + num_shards = 2 + zone_name = "Zone 2" + regions_config = [ + { + region_name = "US_WEST_2" + electable_nodes = 2 + priority = 6 + read_only_nodes = 1 + }, { + region_name = "EU_WEST_1" + electable_nodes = 3 + priority = 7 + } + ] + } + ] +} diff --git a/internal/convert/testdata/clu2adv/dynamic_replication_specs_basic.out.tf b/internal/convert/testdata/clu2adv/dynamic_replication_specs_basic.out.tf new file mode 100644 index 0000000..60549a3 --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_replication_specs_basic.out.tf @@ -0,0 +1,93 @@ +# Based on https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/examples/migrate_cluster_to_advanced_cluster/module_maintainer/v1/main.tf +resource "mongodbatlas_advanced_cluster" "this" { + lifecycle { + precondition { + condition = !(var.auto_scaling_disk_gb_enabled && var.disk_size > 0) + error_message = "Must use either auto_scaling_disk_gb_enabled or disk_size, not both." + } + } + + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + mongo_db_major_version = var.mongo_db_major_version + + + replication_specs = flatten([ + for spec in var.replication_specs : [ + for i in range(spec.num_shards) : { + zone_name = spec.zone_name + region_configs = flatten([ + # Regions must be sorted by priority in descending order. + for priority in range(7, 0, -1) : [ + for region in spec.regions_config : { + provider_name = var.provider_name + region_name = region.region_name + priority = region.priority + electable_specs = region.electable_nodes == 0 ? null : { + node_count = region.electable_nodes + instance_size = var.instance_size + disk_size_gb = var.disk_size + } + read_only_specs = region.read_only_nodes == 0 ? null : { + node_count = region.read_only_nodes + instance_size = var.instance_size + disk_size_gb = var.disk_size + } + auto_scaling = { + disk_gb_enabled = var.auto_scaling_disk_gb_enabled + } + } if priority == region.priority + ] + ]) + } + ] + ]) + tags = var.tags + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} + +# example of variable for demostration purposes, not used in the conversion +variable "replication_specs" { + description = "List of replication specifications in mongodbatlas_cluster format" + type = list(object({ + num_shards = number + zone_name = string + regions_config = list(object({ + region_name = string + electable_nodes = number + priority = number + read_only_nodes = optional(number, 0) + })) + })) + default = [ + { + num_shards = 1 + zone_name = "Zone 1" + regions_config = [ + { + region_name = "US_EAST_1" + electable_nodes = 3 + priority = 7 + } + ] + }, { + num_shards = 2 + zone_name = "Zone 2" + regions_config = [ + { + region_name = "US_WEST_2" + electable_nodes = 2 + priority = 6 + read_only_nodes = 1 + }, { + region_name = "EU_WEST_1" + electable_nodes = 3 + priority = 7 + } + ] + } + ] +} diff --git a/internal/convert/testdata/clu2adv/dynamic_replication_specs_different_var_name.in.tf b/internal/convert/testdata/clu2adv/dynamic_replication_specs_different_var_name.in.tf new file mode 100644 index 0000000..ead726f --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_replication_specs_different_var_name.in.tf @@ -0,0 +1,45 @@ +resource "mongodbatlas_cluster" "different_var_names" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + provider_instance_size_name = var.instance_size + provider_name = var.provider_name + dynamic "replication_specs" { + for_each = var.my_rep_specs + content { + num_shards = replication_specs.value.my_shards + zone_name = replication_specs.value.my_zone + + dynamic "regions_config" { + for_each = replication_specs.value.my_regions + content { + electable_nodes = regions_config.value.my_electable_nodes + priority = regions_config.value.prio + region_name = regions_config.value.my_region_name + } + } + } + } +} + +resource "mongodbatlas_cluster" "different_var_names_no_zone_name" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + provider_instance_size_name = var.instance_size + provider_name = var.provider_name + dynamic "replication_specs" { + for_each = var.my_rep_specs + content { + num_shards = replication_specs.value.my_shards + dynamic "regions_config" { + for_each = replication_specs.value.my_regions + content { + electable_nodes = regions_config.value.my_electable_nodes + priority = regions_config.value.prio + region_name = regions_config.value.my_region_name + } + } + } + } +} diff --git a/internal/convert/testdata/clu2adv/dynamic_replication_specs_different_var_name.out.tf b/internal/convert/testdata/clu2adv/dynamic_replication_specs_different_var_name.out.tf new file mode 100644 index 0000000..f680357 --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_replication_specs_different_var_name.out.tf @@ -0,0 +1,58 @@ +resource "mongodbatlas_advanced_cluster" "different_var_names" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + replication_specs = flatten([ + for spec in var.my_rep_specs : [ + for i in range(spec.my_shards) : { + zone_name = spec.my_zone + region_configs = flatten([ + # Regions must be sorted by priority in descending order. + for priority in range(7, 0, -1) : [ + for region in spec.my_regions : { + provider_name = var.provider_name + region_name = region.my_region_name + priority = region.prio + electable_specs = region.my_electable_nodes == 0 ? null : { + node_count = region.my_electable_nodes + instance_size = var.instance_size + } + } if priority == region.prio + ] + ]) + } + ] + ]) + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} + +resource "mongodbatlas_advanced_cluster" "different_var_names_no_zone_name" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + replication_specs = flatten([ + for spec in var.my_rep_specs : [ + for i in range(spec.my_shards) : { + region_configs = flatten([ + # Regions must be sorted by priority in descending order. + for priority in range(7, 0, -1) : [ + for region in spec.my_regions : { + provider_name = var.provider_name + region_name = region.my_region_name + priority = region.prio + electable_specs = region.my_electable_nodes == 0 ? null : { + node_count = region.my_electable_nodes + instance_size = var.instance_size + } + } if priority == region.prio + ] + ]) + } + ] + ]) + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} diff --git a/internal/convert/testdata/clu2adv/errors.json b/internal/convert/testdata/clu2adv/errors.json index fc81442..67fa980 100644 --- a/internal/convert/testdata/clu2adv/errors.json +++ b/internal/convert/testdata/clu2adv/errors.json @@ -1,10 +1,9 @@ { + "autoscaling_missing_attribute": "setting replication_specs: attribute provider_instance_size_name not found", "configuration_file_error": "failed to parse Terraform config file", "free_cluster_missing_attribute": "free cluster (because no replication_specs): attribute backing_provider_name not found", - "autoscaling_missing_attribute": "setting replication_specs: attribute provider_instance_size_name not found", - "replication_specs_missing_regions_config": "setting replication_specs: regions_config not found", "regions_config_missing_priority": "setting replication_specs: attribute priority not found", - "replication_specs_unsupported_dynamic": "dynamic blocks are not supported", - "replication_specs_non_literal_num_shards": "setting num_shards: failed to evaluate number", - "replication_specs_missing_num_shards": "num_shards not found" + "replication_specs_missing_num_shards": "num_shards not found", + "replication_specs_missing_regions_config": "setting replication_specs: regions_config not found", + "replication_specs_non_literal_num_shards": "setting num_shards: failed to evaluate number" } diff --git a/internal/convert/testdata/clu2adv/replication_specs_unsupported_dynamic.in.tf b/internal/convert/testdata/clu2adv/replication_specs_unsupported_dynamic.in.tf deleted file mode 100644 index c2680bc..0000000 --- a/internal/convert/testdata/clu2adv/replication_specs_unsupported_dynamic.in.tf +++ /dev/null @@ -1,29 +0,0 @@ - -resource "mongodbatlas_cluster" "geo" { - project_id = var.project_id - name = "geo" - cluster_type = "GEOSHARDED" - num_shards = 1 - provider_name = "AWS" - provider_instance_size_name = "M30" - - dynamic "replication_specs" { - for_each = { - "Zone 1" = { - region_name = "US_EAST_1" - }, - "Zone 2" = { - region_name = "US_WEST_2" - } - } - content { - zone_name = replication_specs.key - num_shards = 2 - regions_config { - region_name = replication_specs.value.region_name - electable_nodes = 3 - priority = 7 - } - } - } -}