From 41e70252a1899dcbe88c9b96077290288d08540f Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 10 Sep 2025 12:39:53 +0200 Subject: [PATCH 01/15] test files --- ...tion_specs_no_dynamic_region_configs.in.tf | 82 ++++++++++++++++++ ...ion_specs_no_dynamic_region_configs.out.tf | 84 +++++++++++++++++++ 2 files changed, 166 insertions(+) create mode 100644 internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.in.tf create mode 100644 internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.out.tf diff --git a/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.in.tf b/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.in.tf new file mode 100644 index 0000000..947056a --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.in.tf @@ -0,0 +1,82 @@ +locals { + replication_specs_list = [ + { + zone_name = "zone1" + region_name = "US_EAST_1" + }, + { + zone_name = "zone2" + region_name = "US_WEST_2" + } + ] +} + +resource "mongodbatlas_advanced_cluster" "one_config" { + project_id = "123" + name = "cluster" + cluster_type = "SHARDED" + + dynamic "replication_specs" { + for_each = local.replication_specs_list + content { + num_shards = 2 + zone_name = replication_specs.value.zone_name + + region_configs { + provider_name = "AWS" + region_name = replication_specs.value.region_name + priority = 7 + + electable_specs { + instance_size = "M10" + node_count = 3 + } + auto_scaling { + disk_gb_enabled = true + } + } + } + } +} + +resource "mongodbatlas_advanced_cluster" "multiple_config" { + project_id = "123" + name = "cluster" + cluster_type = "SHARDED" + + dynamic "replication_specs" { + for_each = local.replication_specs_list + content { + num_shards = 2 + zone_name = replication_specs.value.zone_name + + region_configs { + provider_name = "AWS" + region_name = replication_specs.value.region_name + priority = 7 + + electable_specs { + instance_size = "M10" + node_count = 2 + } + auto_scaling { + disk_gb_enabled = true + } + } + + region_configs { + provider_name = "AWS" + region_name = replication_specs.value.region_name + priority = 6 + + electable_specs { + instance_size = "M10" + node_count = 1 + } + auto_scaling { + disk_gb_enabled = true + } + } + } + } +} diff --git a/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.out.tf b/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.out.tf new file mode 100644 index 0000000..d02f709 --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.out.tf @@ -0,0 +1,84 @@ +locals { + replication_specs_list = [ + { + zone_name = "zone1" + region_name = "US_EAST_1" + }, + { + zone_name = "zone2" + region_name = "US_WEST_2" + } + ] +} + +resource "mongodbatlas_advanced_cluster" "one_config" { + project_id = "123" + name = "cluster" + cluster_type = "SHARDED" + + replication_specs = flatten([ + for spec in local.replication_specs_list : [ + for i in range(2) : { + zone_name = spec.zone_name + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = spec.region_name + electable_specs = { + instance_size = "M10" + node_count = 3 + } + auto_scaling = { + disk_gb_enabled = true + } + } + ] + } + ] + ]) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "multiple_config" { + project_id = "123" + name = "cluster" + cluster_type = "SHARDED" + + replication_specs = flatten([ + for spec in local.replication_specs_list : [ + for i in range(2) : { + zone_name = spec.zone_name + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = spec.region_name + electable_specs = { + instance_size = "M10" + node_count = 2 + } + auto_scaling = { + disk_gb_enabled = true + } + }, + { + priority = 6 + provider_name = "AWS" + region_name = spec.region_name + electable_specs = { + instance_size = "M10" + node_count = 1 + } + auto_scaling = { + disk_gb_enabled = true + } + } + ] + } + ] + ]) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} From ad0a164f9b46a40b68cf4baa40e7f9c1a06eda3a Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 10 Sep 2025 14:06:52 +0200 Subject: [PATCH 02/15] update doc --- docs/command_adv2v2.md | 2 ++ docs/command_clu2adv.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/command_adv2v2.md b/docs/command_adv2v2.md index d24f919..44f89d6 100644 --- a/docs/command_adv2v2.md +++ b/docs/command_adv2v2.md @@ -56,6 +56,7 @@ dynamic "tags" { ### Dynamic blocks in region_configs You can use `dynamic` blocks for `region_configs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects. +**Note:** `map` and `set` are not supported. This is an example of how to use dynamic blocks in `region_configs`: ```hcl @@ -81,6 +82,7 @@ replication_specs { ### Dynamic blocks in replication_specs You can use `dynamic` blocks for `replication_specs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects. +**Note:** `map` and `set` are not supported. This is an example of how to use dynamic blocks in `replication_specs`: ```hcl diff --git a/docs/command_clu2adv.md b/docs/command_clu2adv.md index 88986b5..ce4fc41 100644 --- a/docs/command_clu2adv.md +++ b/docs/command_clu2adv.md @@ -56,6 +56,7 @@ dynamic "tags" { ### Dynamic blocks in regions_config You can use `dynamic` blocks for `regions_config`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects. +**Note:** `map` and `set` are not supported. This is an example of how to use dynamic blocks in `regions_config`: ```hcl @@ -77,6 +78,7 @@ replication_specs { ### Dynamic blocks in replication_specs You can use `dynamic` blocks for `replication_specs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects. +**Note:** `map` and `set` are not supported. This is an example of how to use dynamic blocks in `replication_specs`: ```hcl From a4e0c707d11ea28f0a9797bb56bbf0d2ea80df77 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 10 Sep 2025 14:23:28 +0200 Subject: [PATCH 03/15] implementation --- internal/convert/adv2v2.go | 90 +++++++++++++++++++++++++++++++++++++- 1 file changed, 88 insertions(+), 2 deletions(-) diff --git a/internal/convert/adv2v2.go b/internal/convert/adv2v2.go index 734da45..6b01e07 100644 --- a/internal/convert/adv2v2.go +++ b/internal/convert/adv2v2.go @@ -129,8 +129,94 @@ func convertRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwri if err != nil { return dynamicBlock{}, err } - forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) - dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, dConfig.tokens...)) + + // Check if we have a dynamic region_configs block that was successfully processed + if dConfig.tokens != nil { + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, dConfig.tokens...)) + return dSpec, nil + } + + // Handle static region_configs blocks inside dynamic replication_specs + specBody := dSpec.content.Body() + + // Collect static region_configs blocks + staticConfigs := collectBlocks(specBody, nConfig) + if len(staticConfigs) == 0 { + // No static blocks found, this might be an error case + // Check if there's also no dynamic block (which would have been handled above) + hasDynamicBlock := false + for _, block := range specBody.Blocks() { + if block.Type() == nDynamic && getResourceName(block) == nConfig { + hasDynamicBlock = true + break + } + } + if !hasDynamicBlock { + return dynamicBlock{}, fmt.Errorf("replication_specs must have at least one region_configs") + } + // There's a dynamic block but convertConfigsWithDynamicBlock returned empty + // This shouldn't happen, but return the error from that function + return dynamicBlock{}, nil + } + + repSpecb := hclwrite.NewEmptyFile().Body() + + // Handle zone_name attribute + if zoneNameAttr := specBody.GetAttribute(nZoneName); zoneNameAttr != nil { + zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), nRepSpecs, nSpec) + repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr)) + } + + // Process static region_configs blocks + var configs []*hclwrite.Body + for _, configBlock := range staticConfigs { + configBlockb := configBlock.Body() + // Create a new body with sorted attributes + newConfigBody := hclwrite.NewEmptyFile().Body() + + // Copy attributes in the expected order + attrs := configBlockb.Attributes() + // Priority, provider_name, region_name should come first + if priority := attrs["priority"]; priority != nil { + newConfigBody.SetAttributeRaw("priority", priority.Expr().BuildTokens(nil)) + } + if provider := attrs["provider_name"]; provider != nil { + newConfigBody.SetAttributeRaw("provider_name", provider.Expr().BuildTokens(nil)) + } + if region := attrs["region_name"]; region != nil { + newConfigBody.SetAttributeRaw("region_name", region.Expr().BuildTokens(nil)) + } + + // Process spec blocks and convert them to attributes + for _, block := range configBlockb.Blocks() { + blockType := block.Type() + blockBody := hclwrite.NewEmptyFile().Body() + copyAttributesSorted(blockBody, block.Body().Attributes()) + if diskSizeGB != nil && + (blockType == nElectableSpecs || blockType == nReadOnlySpecs || blockType == nAnalyticsSpecs) { + blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB) + } + newConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) + } + + configs = append(configs, newConfigBody) + } + + repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) + + // Handle num_shards attribute + if numShardsAttr := specBody.GetAttribute(nNumShards); numShardsAttr != nil { + numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), nRepSpecs, nSpec) + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + innerFor := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false)) + innerFor = append(innerFor, hcl.TokensObject(repSpecb)...) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, hcl.EncloseBracketsNewLines(innerFor)...)) + } else { + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, hcl.TokensArraySingle(repSpecb)...)) + } + return dSpec, nil } From 9a5442077dfb12d7821ea311a9ace4cf16172bd7 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 10 Sep 2025 15:20:20 +0200 Subject: [PATCH 04/15] test files for clu2adv --- ...tion_specs_no_dynamic_region_configs.in.tf | 60 +++++++++++++ ...ion_specs_no_dynamic_region_configs.out.tf | 90 +++++++++++++++++++ 2 files changed, 150 insertions(+) create mode 100644 internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.in.tf create mode 100644 internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.out.tf diff --git a/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.in.tf b/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.in.tf new file mode 100644 index 0000000..11e7c6b --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.in.tf @@ -0,0 +1,60 @@ +locals { + replication_specs_list = [ + { + zone_name = "zone1" + region_name = "US_EAST_1" + }, + { + zone_name = "zone2" + region_name = "US_WEST_2" + } + ] +} + +resource "mongodbatlas_cluster" "one_config" { + project_id = "123" + name = "cluster" + provider_name = "AWS" + provider_instance_size_name = "M10" + disk_size_gb = 10 + auto_scaling_disk_gb_enabled = true + dynamic "replication_specs" { + for_each = local.replication_specs_list + content { + num_shards = 2 + zone_name = replication_specs.value.zone_name + regions_config { + region_name = replication_specs.value.region_name + priority = 7 + electable_nodes = 3 + } + } + } +} + +resource "mongodbatlas_cluster" "multiple_config" { + project_id = "123" + name = "cluster" + provider_name = "AWS" + provider_instance_size_name = "M10" + disk_size_gb = 10 + auto_scaling_disk_gb_enabled = true + dynamic "replication_specs" { + for_each = local.replication_specs_list + content { + num_shards = 2 + zone_name = replication_specs.value.zone_name + regions_config { + region_name = replication_specs.value.region_name + priority = 7 + electable_nodes = 2 + } + regions_config { + region_name = replication_specs.value.region_name + priority = 6 + electable_nodes = 1 + read_only_nodes = 4 + } + } + } +} diff --git a/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.out.tf b/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.out.tf new file mode 100644 index 0000000..32202cf --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.out.tf @@ -0,0 +1,90 @@ +locals { + replication_specs_list = [ + { + zone_name = "zone1" + region_name = "US_EAST_1" + }, + { + zone_name = "zone2" + region_name = "US_WEST_2" + } + ] +} + +resource "mongodbatlas_advanced_cluster" "one_config" { + project_id = "123" + name = "cluster" + replication_specs = flatten([ + for spec in local.replication_specs_list : [ + for i in range(2) : { + zone_name = spec.zone_name + region_configs = [ + { + provider_name = "AWS" + region_name = spec.region_name + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + disk_size_gb = 10 + } + auto_scaling = { + disk_gb_enabled = true + } + } + ] + } + ] + ]) + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} + +resource "mongodbatlas_advanced_cluster" "multiple_config" { + project_id = "123" + name = "cluster" + replication_specs = flatten([ + for spec in local.replication_specs_list : [ + for i in range(2) : { + zone_name = spec.zone_name + region_configs = [ + { + provider_name = "AWS" + region_name = spec.region_name + priority = 7 + electable_specs = { + node_count = 2 + instance_size = "M10" + disk_size_gb = 10 + } + auto_scaling = { + disk_gb_enabled = true + } + }, + { + provider_name = "AWS" + region_name = spec.region_name + priority = 6 + electable_specs = { + node_count = 1 + instance_size = "M10" + disk_size_gb = 10 + } + read_only_specs = { + node_count = 4 + instance_size = "M10" + disk_size_gb = 10 + } + auto_scaling = { + disk_gb_enabled = true + } + } + ] + } + ] + ]) + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} From bee0dfbce834bbac2c500574549d5a6f10ead49e Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 10 Sep 2025 15:20:34 +0200 Subject: [PATCH 05/15] implement clu2adv --- internal/convert/clu2adv.go | 68 ++++++++++++++++++++++++++++++++++--- 1 file changed, 64 insertions(+), 4 deletions(-) diff --git a/internal/convert/clu2adv.go b/internal/convert/clu2adv.go index 620c990..e4d4a06 100644 --- a/internal/convert/clu2adv.go +++ b/internal/convert/clu2adv.go @@ -230,10 +230,70 @@ func fillRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dyna if err != nil { return dynamicBlock{}, err } - forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) - forSpec = append(forSpec, dConfig.tokens...) - tokens := hcl.TokensFuncFlatten(forSpec) - dSpec.tokens = tokens + + // Check if we have a dynamic region_configs block that was successfully processed + if dConfig.tokens != nil { + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + forSpec = append(forSpec, dConfig.tokens...) + tokens := hcl.TokensFuncFlatten(forSpec) + dSpec.tokens = tokens + return dSpec, nil + } + + // Handle static region_configs blocks inside dynamic replication_specs + specBody := dSpec.content.Body() + + // Collect static region_configs blocks + staticConfigs := collectBlocks(specBody, nConfigSrc) + if len(staticConfigs) == 0 { + // No static blocks found, check if there's also no dynamic block + hasDynamicBlock := false + for _, block := range specBody.Blocks() { + if block.Type() == nDynamic && getResourceName(block) == nConfigSrc { + hasDynamicBlock = true + break + } + } + if !hasDynamicBlock { + return dynamicBlock{}, fmt.Errorf("replication_specs must have at least one regions_config") + } + // There's a dynamic block but fillConfigsWithDynamicRegion returned empty + return dynamicBlock{}, nil + } + + repSpecb := hclwrite.NewEmptyFile().Body() + + // Handle zone_name attribute + if zoneNameAttr := specBody.GetAttribute(nZoneName); zoneNameAttr != nil { + zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), nRepSpecs, nSpec) + repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr)) + } + + // Process static region_configs blocks + var configs []*hclwrite.Body + for _, configBlock := range staticConfigs { + config, err := getRegionConfig(configBlock, root, false) + if err != nil { + return dynamicBlock{}, err + } + configs = append(configs, config) + } + + configs = sortConfigsByPriority(configs) + repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) + + // Handle num_shards attribute + if numShardsAttr := specBody.GetAttribute(nNumShards); numShardsAttr != nil { + numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), nRepSpecs, nSpec) + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + innerFor := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false)) + innerFor = append(innerFor, hcl.TokensObject(repSpecb)...) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, hcl.EncloseBracketsNewLines(innerFor)...)) + } else { + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, hcl.TokensArraySingle(repSpecb)...)) + } + return dSpec, nil } From 7584850dca646437cec73e837271bb052c98ecb5 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 10 Sep 2025 18:19:33 +0200 Subject: [PATCH 06/15] clarify doc about Comments and formatting --- docs/command_adv2v2.md | 9 +++++++++ docs/command_clu2adv.md | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/docs/command_adv2v2.md b/docs/command_adv2v2.md index 44f89d6..02afb04 100644 --- a/docs/command_adv2v2.md +++ b/docs/command_adv2v2.md @@ -26,6 +26,15 @@ atlas tf adv2v2 -f in.tf -o out.tf - `--replaceOutput` or `-r`: Overwrite the file at the output path if it already exists. You can also modify the input file in-place. - `--watch` or `-w`: Keep the plugin running and watching for changes in the input file +## Comments and formatting + +During the conversion process, some formatting elements may not be preserved: +- Some comments from the original resources may not be preserved in the output +- Custom blank lines and spacing may be modified +- The output file will have standardized formatting + +We recommend reviewing the converted output and re-adding any important comments or documentation that you need to maintain. + ## Examples You can find [here](https://github.com/mongodb-labs/atlas-cli-plugin-terraform/tree/main/internal/convert/testdata/adv2v2) examples of input files (suffix .in.tf) and the corresponding output files (suffix .out.tf). diff --git a/docs/command_clu2adv.md b/docs/command_clu2adv.md index ce4fc41..ba10a2e 100644 --- a/docs/command_clu2adv.md +++ b/docs/command_clu2adv.md @@ -26,6 +26,15 @@ atlas tf clu2adv -f in.tf -o out.tf - `--watch` or `-w`: Keep the plugin running and watching for changes in the input file - `--includeMoved` or `-m`: Include the `moved blocks` in the output file +## Comments and formatting + +During the conversion process, some formatting elements may not be preserved: +- Some comments from the original resources may not be preserved in the output +- Custom blank lines and spacing may be modified +- The output file will have standardized formatting + +We recommend reviewing the converted output and re-adding any important comments or documentation that you need to maintain. + ## Examples You can find [here](https://github.com/mongodb-labs/atlas-cli-plugin-terraform/tree/main/internal/convert/testdata/clu2adv) some examples of input files (suffix .in.tf) and the corresponding output files (suffix .out.tf). From c0e0bacad13fa5fd2c87788ae7aa0ce596406546 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 09:31:58 +0200 Subject: [PATCH 07/15] refactors --- internal/convert/adv2v2.go | 154 ++++++++++++---------------- internal/convert/clu2adv.go | 193 +++++++++++++++--------------------- internal/convert/shared.go | 70 +++++++++++++ 3 files changed, 213 insertions(+), 204 deletions(-) diff --git a/internal/convert/adv2v2.go b/internal/convert/adv2v2.go index 6b01e07..862ea6a 100644 --- a/internal/convert/adv2v2.go +++ b/internal/convert/adv2v2.go @@ -18,20 +18,18 @@ func AdvancedClusterToV2(config []byte) ([]byte, error) { } parserb := parser.Body() for _, block := range parserb.Blocks() { - updated, err := updateResource(block) + updated, err := processResource(block) if err != nil { return nil, err } - if updated { // If the resource was converted, add a comment at the end so user knows the resource was updated - blockb := block.Body() - blockb.AppendNewline() - hcl.AppendComment(blockb, commentUpdatedBy) + if updated { + addConversionComments(block, true) } } return parser.Bytes(), nil } -func updateResource(resource *hclwrite.Block) (bool, error) { +func processResource(resource *hclwrite.Block) (bool, error) { if resource.Type() != resourceType || getResourceName(resource) != advCluster { return false, nil } @@ -43,24 +41,17 @@ func updateResource(resource *hclwrite.Block) (bool, error) { return false, nil } diskSizeGB, _ := hcl.PopAttr(resourceb, nDiskSizeGB, errRoot) // ok to fail as it's optional - if err := convertRepSpecs(resourceb, diskSizeGB); err != nil { + if err := processRepSpecs(resourceb, diskSizeGB); err != nil { return false, err } - if err := fillTagsLabelsOpt(resourceb, nTags); err != nil { + if err := processCommonOptionalBlocks(resourceb); err != nil { return false, err } - if err := fillTagsLabelsOpt(resourceb, nLabels); err != nil { - return false, err - } - fillAdvConfigOpt(resourceb) - fillBlockOpt(resourceb, nBiConnector) - fillBlockOpt(resourceb, nPinnedFCV) - fillBlockOpt(resourceb, nTimeouts) return true, nil } -func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error { - d, err := convertRepSpecsWithDynamicBlock(resourceb, diskSizeGB) +func processRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error { + d, err := processRepSpecsWithDynamicBlock(resourceb, diskSizeGB) if err != nil { return err } @@ -80,7 +71,7 @@ func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error blockb := block.Body() shardsAttr := blockb.GetAttribute(nNumShards) blockb.RemoveAttribute(nNumShards) - dConfig, err := convertConfigsWithDynamicBlock(blockb, diskSizeGB, false) + dConfig, err := processConfigsWithDynamicBlock(blockb, diskSizeGB, false) if err != nil { return err } @@ -119,18 +110,16 @@ func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error return nil } -func convertRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) (dynamicBlock, error) { +func processRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) (dynamicBlock, error) { dSpec, err := getDynamicBlock(resourceb, nRepSpecs, true) if err != nil || !dSpec.IsPresent() { return dynamicBlock{}, err } transformReferences(dSpec.content.Body(), nRepSpecs, nSpec) - dConfig, err := convertConfigsWithDynamicBlock(dSpec.content.Body(), diskSizeGB, true) + dConfig, err := processConfigsWithDynamicBlock(dSpec.content.Body(), diskSizeGB, true) if err != nil { return dynamicBlock{}, err } - - // Check if we have a dynamic region_configs block that was successfully processed if dConfig.tokens != nil { forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, dConfig.tokens...)) @@ -163,64 +152,28 @@ func convertRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwri repSpecb := hclwrite.NewEmptyFile().Body() // Handle zone_name attribute - if zoneNameAttr := specBody.GetAttribute(nZoneName); zoneNameAttr != nil { - zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), nRepSpecs, nSpec) - repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr)) - } + handleZoneName(repSpecb, specBody, nRepSpecs, nSpec) // Process static region_configs blocks var configs []*hclwrite.Body for _, configBlock := range staticConfigs { configBlockb := configBlock.Body() - // Create a new body with sorted attributes - newConfigBody := hclwrite.NewEmptyFile().Body() - - // Copy attributes in the expected order - attrs := configBlockb.Attributes() - // Priority, provider_name, region_name should come first - if priority := attrs["priority"]; priority != nil { - newConfigBody.SetAttributeRaw("priority", priority.Expr().BuildTokens(nil)) - } - if provider := attrs["provider_name"]; provider != nil { - newConfigBody.SetAttributeRaw("provider_name", provider.Expr().BuildTokens(nil)) - } - if region := attrs["region_name"]; region != nil { - newConfigBody.SetAttributeRaw("region_name", region.Expr().BuildTokens(nil)) - } - - // Process spec blocks and convert them to attributes - for _, block := range configBlockb.Blocks() { - blockType := block.Type() - blockBody := hclwrite.NewEmptyFile().Body() - copyAttributesSorted(blockBody, block.Body().Attributes()) - if diskSizeGB != nil && - (blockType == nElectableSpecs || blockType == nReadOnlySpecs || blockType == nAnalyticsSpecs) { - blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB) - } - newConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) - } - + newConfigBody := processConfigForDynamicBlock(configBlockb, diskSizeGB) configs = append(configs, newConfigBody) } repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) // Handle num_shards attribute - if numShardsAttr := specBody.GetAttribute(nNumShards); numShardsAttr != nil { - numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), nRepSpecs, nSpec) - forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) - innerFor := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false)) - innerFor = append(innerFor, hcl.TokensObject(repSpecb)...) - dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, hcl.EncloseBracketsNewLines(innerFor)...)) - } else { - forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) - dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, hcl.TokensArraySingle(repSpecb)...)) - } + numShardsAttr := specBody.GetAttribute(nNumShards) + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + numShardsTokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, numShardsTokens...)) return dSpec, nil } -func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite.Tokens, +func processConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite.Tokens, insideDynamicRepSpec bool) (dynamicBlock, error) { d, err := getDynamicBlock(specbSrc, nConfig, true) if err != nil || !d.IsPresent() { @@ -228,18 +181,7 @@ func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite } configBody := d.content.Body() transformReferences(configBody, getResourceName(d.block), nRegion) - regionConfigBody := hclwrite.NewEmptyFile().Body() - copyAttributesSorted(regionConfigBody, configBody.Attributes()) - for _, block := range configBody.Blocks() { - blockType := block.Type() - blockBody := hclwrite.NewEmptyFile().Body() - copyAttributesSorted(blockBody, block.Body().Attributes()) - if diskSizeGB != nil && - (blockType == nElectableSpecs || blockType == nReadOnlySpecs || blockType == nAnalyticsSpecs) { - blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB) - } - regionConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) - } + regionConfigBody := processConfigForDynamicBlock(configBody, diskSizeGB) forEach := hcl.GetAttrExpr(d.forEach) if insideDynamicRepSpec { forEach = fmt.Sprintf("%s.%s", nSpec, nConfig) @@ -251,18 +193,11 @@ func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite return d, nil } repSpecb := hclwrite.NewEmptyFile().Body() - if zoneNameAttr := specbSrc.GetAttribute(nZoneName); zoneNameAttr != nil { - zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), nRepSpecs, nSpec) - repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr)) - } + handleZoneName(repSpecb, specbSrc, nRepSpecs, nSpec) repSpecb.SetAttributeRaw(nConfig, hcl.EncloseBracketsNewLines(regionTokens)) - if numShardsAttr := specbSrc.GetAttribute(nNumShards); numShardsAttr != nil { - numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), nRepSpecs, nSpec) - tokens := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false)) - tokens = append(tokens, hcl.TokensObject(repSpecb)...) - return dynamicBlock{tokens: hcl.EncloseBracketsNewLines(tokens)}, nil - } - return dynamicBlock{tokens: hcl.TokensArraySingle(repSpecb)}, nil + numShardsAttr := specbSrc.GetAttribute(nNumShards) + tokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec) + return dynamicBlock{tokens: tokens}, nil } // hasExpectedBlocksAsAttributes checks if any of the expected block names @@ -291,11 +226,44 @@ func copyAttributesSorted(targetBody *hclwrite.Body, sourceAttrs map[string]*hcl } func processAllSpecs(body *hclwrite.Body, diskSizeGB hclwrite.Tokens) { - fillSpecOpt(body, nElectableSpecs, diskSizeGB) - fillSpecOpt(body, nReadOnlySpecs, diskSizeGB) - fillSpecOpt(body, nAnalyticsSpecs, diskSizeGB) - fillSpecOpt(body, nAutoScaling, nil) - fillSpecOpt(body, nAnalyticsAutoScaling, nil) + // Process specs that need diskSizeGB + specsWithDisk := []string{nElectableSpecs, nReadOnlySpecs, nAnalyticsSpecs} + for _, spec := range specsWithDisk { + fillSpecOpt(body, spec, diskSizeGB) + } + // Process specs without diskSizeGB + specsWithoutDisk := []string{nAutoScaling, nAnalyticsAutoScaling} + for _, spec := range specsWithoutDisk { + fillSpecOpt(body, spec, nil) + } +} + +func processConfigForDynamicBlock(configBlockb *hclwrite.Body, diskSizeGB hclwrite.Tokens) *hclwrite.Body { + // Create a new body with sorted attributes + newConfigBody := hclwrite.NewEmptyFile().Body() + + // Copy attributes in the expected order (priority, provider_name, region_name first) + attrs := configBlockb.Attributes() + orderedAttrs := []string{nPriority, nProviderName, nRegionName} + for _, attrName := range orderedAttrs { + if attr := attrs[attrName]; attr != nil { + newConfigBody.SetAttributeRaw(attrName, attr.Expr().BuildTokens(nil)) + } + } + + // Process spec blocks and convert them to attributes + for _, block := range configBlockb.Blocks() { + blockType := block.Type() + blockBody := hclwrite.NewEmptyFile().Body() + copyAttributesSorted(blockBody, block.Body().Attributes()) + if diskSizeGB != nil && + (blockType == nElectableSpecs || blockType == nReadOnlySpecs || blockType == nAnalyticsSpecs) { + blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB) + } + newConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) + } + + return newConfigBody } func fillSpecOpt(resourceb *hclwrite.Body, name string, diskSizeGBTokens hclwrite.Tokens) { diff --git a/internal/convert/clu2adv.go b/internal/convert/clu2adv.go index e4d4a06..869e98e 100644 --- a/internal/convert/clu2adv.go +++ b/internal/convert/clu2adv.go @@ -3,10 +3,7 @@ package convert import ( "fmt" "sort" - "strconv" - "strings" - "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" "github.com/zclconf/go-cty/cty" @@ -43,10 +40,7 @@ func ClusterToAdvancedCluster(config []byte, includeMoved bool) ([]byte, error) } convertedDataSource := convertDataSource(block) if convertedResource || convertedDataSource { - blockb := block.Body() - blockb.AppendNewline() - hcl.AppendComment(blockb, commentGeneratedBy) - hcl.AppendComment(blockb, commentConfirmReferences) + addConversionComments(block, false) } } fillMovedBlocks(parserb, moveLabels) @@ -64,9 +58,9 @@ func convertResource(block *hclwrite.Block) (bool, error) { } var err error if isFreeTierCluster(blockb) { - err = fillFreeTierCluster(blockb) + err = processFreeTierCluster(blockb) } else { - err = fillCluster(blockb) + err = processCluster(blockb) } if err != nil { return false, err @@ -114,18 +108,23 @@ func fillMovedBlocks(body *hclwrite.Body, moveLabels []string) { } // fillFreeTierCluster is the entry point to convert clusters in free tier -func fillFreeTierCluster(resourceb *hclwrite.Body) error { +func processFreeTierCluster(resourceb *hclwrite.Body) error { resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType)) configb := hclwrite.NewEmptyFile().Body() hcl.SetAttrInt(configb, nPriority, valMaxPriority) - if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { - return err - } - if err := hcl.MoveAttr(resourceb, configb, nProviderName, nProviderName, errFreeCluster); err != nil { - return err + + // Move required attributes for free tier cluster + moveAttrs := []struct { + src, dst string + }{ + {nRegionNameSrc, nRegionName}, + {nProviderName, nProviderName}, + {nBackingProviderName, nBackingProviderName}, } - if err := hcl.MoveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil { - return err + for _, attr := range moveAttrs { + if err := hcl.MoveAttr(resourceb, configb, attr.src, attr.dst, errFreeCluster); err != nil { + return err + } } electableSpecb := hclwrite.NewEmptyFile().Body() if err := hcl.MoveAttr(resourceb, electableSpecb, nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil { @@ -139,7 +138,7 @@ func fillFreeTierCluster(resourceb *hclwrite.Body) error { } // fillCluster is the entry point to convert clusters with replications_specs (all but free tier) -func fillCluster(resourceb *hclwrite.Body) error { +func processCluster(resourceb *hclwrite.Body) error { root, errRoot := popRootAttrs(resourceb) if errRoot != nil { return errRoot @@ -147,24 +146,14 @@ func fillCluster(resourceb *hclwrite.Body) error { resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs // ok to fail as cloud_backup is optional _ = hcl.MoveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs) - if err := fillRepSpecs(resourceb, root); err != nil { - return err - } - if err := fillTagsLabelsOpt(resourceb, nTags); err != nil { + if err := processRepSpecsCluster(resourceb, root); err != nil { return err } - if err := fillTagsLabelsOpt(resourceb, nLabels); err != nil { - return err - } - fillAdvConfigOpt(resourceb) - fillBlockOpt(resourceb, nBiConnector) - fillBlockOpt(resourceb, nPinnedFCV) - fillBlockOpt(resourceb, nTimeouts) - return nil + return processCommonOptionalBlocks(resourceb) } -func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { - d, err := fillRepSpecsWithDynamicBlock(resourceb, root) +func processRepSpecsCluster(resourceb *hclwrite.Body, root attrVals) error { + d, err := processRepSpecsClusterWithDynamicBlock(resourceb, root) if err != nil { return err } @@ -177,7 +166,7 @@ func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { if len(repSpecBlocks) == 0 { return fmt.Errorf("must have at least one replication_specs") } - dConfig, err := fillConfigsWithDynamicRegion(repSpecBlocks[0].Body(), root, false) + dConfig, err := processConfigsWithDynamicRegion(repSpecBlocks[0].Body(), root, false) if err != nil { return err } @@ -196,7 +185,7 @@ func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { if shardsAttr == nil { return fmt.Errorf("%s: %s not found", errRepSpecs, nNumShards) } - if errConfig := fillRegionConfigs(specb, specbSrc, root); errConfig != nil { + if errConfig := processRegionConfigs(specb, specbSrc, root); errConfig != nil { return errConfig } if hasVariableShards { @@ -220,13 +209,13 @@ func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { } // fillRepSpecsWithDynamicBlock used for dynamic blocks in replication_specs -func fillRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dynamicBlock, error) { +func processRepSpecsClusterWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dynamicBlock, error) { dSpec, err := getDynamicBlock(resourceb, nRepSpecs, true) if err != nil || !dSpec.IsPresent() { return dynamicBlock{}, err } transformReferences(dSpec.content.Body(), nRepSpecs, nSpec) - dConfig, err := fillConfigsWithDynamicRegion(dSpec.content.Body(), root, true) + dConfig, err := processConfigsWithDynamicRegion(dSpec.content.Body(), root, true) if err != nil { return dynamicBlock{}, err } @@ -264,10 +253,7 @@ func fillRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dyna repSpecb := hclwrite.NewEmptyFile().Body() // Handle zone_name attribute - if zoneNameAttr := specBody.GetAttribute(nZoneName); zoneNameAttr != nil { - zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), nRepSpecs, nSpec) - repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr)) - } + handleZoneName(repSpecb, specBody, nRepSpecs, nSpec) // Process static region_configs blocks var configs []*hclwrite.Body @@ -283,22 +269,17 @@ func fillRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dyna repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) // Handle num_shards attribute - if numShardsAttr := specBody.GetAttribute(nNumShards); numShardsAttr != nil { - numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), nRepSpecs, nSpec) - forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) - innerFor := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false)) - innerFor = append(innerFor, hcl.TokensObject(repSpecb)...) - dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, hcl.EncloseBracketsNewLines(innerFor)...)) - } else { - forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) - dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, hcl.TokensArraySingle(repSpecb)...)) - } + numShardsAttr := specBody.GetAttribute(nNumShards) + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + numShardsTokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, numShardsTokens...)) return dSpec, nil } // fillConfigsWithDynamicRegion is used for dynamic blocks in region_configs -func fillConfigsWithDynamicRegion(specbSrc *hclwrite.Body, root attrVals, changeReferences bool) (dynamicBlock, error) { +func processConfigsWithDynamicRegion(specbSrc *hclwrite.Body, root attrVals, + changeReferences bool) (dynamicBlock, error) { d, err := getDynamicBlock(specbSrc, nConfigSrc, true) if err != nil || !d.IsPresent() { return dynamicBlock{}, err @@ -330,7 +311,7 @@ func fillConfigsWithDynamicRegion(specbSrc *hclwrite.Body, root attrVals, change return d, nil } -func fillRegionConfigs(specb, specbSrc *hclwrite.Body, root attrVals) error { +func processRegionConfigs(specb, specbSrc *hclwrite.Body, root attrVals) error { var configs []*hclwrite.Body for { configSrc := specbSrc.FirstMatchingBlock(nConfigSrc, nil) @@ -361,14 +342,19 @@ func getRegionConfig(configSrc *hclwrite.Block, root attrVals, isDynamicBlock bo if err := hcl.MoveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil { return nil, err } - if electable, _ := getSpec(configSrc, nElectableNodes, root, isDynamicBlock); electable != nil { - fileb.SetAttributeRaw(nElectableSpecs, electable) - } - if readOnly, _ := getSpec(configSrc, nReadOnlyNodes, root, isDynamicBlock); readOnly != nil { - fileb.SetAttributeRaw(nReadOnlySpecs, readOnly) - } - if analytics, _ := getSpec(configSrc, nAnalyticsNodes, root, isDynamicBlock); analytics != nil { - fileb.SetAttributeRaw(nAnalyticsSpecs, analytics) + // Process all spec types + specMappings := []struct { + nodeName string + specName string + }{ + {nElectableNodes, nElectableSpecs}, + {nReadOnlyNodes, nReadOnlySpecs}, + {nAnalyticsNodes, nAnalyticsSpecs}, + } + for _, mapping := range specMappings { + if spec, _ := getSpec(configSrc, mapping.nodeName, root, isDynamicBlock); spec != nil { + fileb.SetAttributeRaw(mapping.specName, spec) + } } if autoScaling := getAutoScalingOpt(root.opt); autoScaling != nil { fileb.SetAttributeRaw(nAutoScaling, autoScaling) @@ -449,11 +435,6 @@ func getResourceLabel(resource *hclwrite.Block) string { return labels[1] } -func replaceDynamicBlockExpr(attr *hclwrite.Attribute, blockName, attrName string) string { - expr := hcl.GetAttrExpr(attr) - return strings.ReplaceAll(expr, fmt.Sprintf("%s.%s", blockName, attrName), attrName) -} - // getDynamicBlockRegionArray returns the region array for a dynamic block in replication_specs. // e.g. [ for region in var.replication_specs.regions_config : { ... } if priority == region.priority ] func getDynamicBlockRegionArray(forEach string, configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { @@ -486,55 +467,45 @@ func sortConfigsByPriority(configs []*hclwrite.Body) []*hclwrite.Body { return configs } -func setKeyValue(body *hclwrite.Body, key, value *hclwrite.Attribute) { - keyStr, err := hcl.GetAttrString(key) - if err == nil { - if !hclsyntax.ValidIdentifier(keyStr) { - // wrap in quotes so invalid identifiers (e.g. with blanks) can be used as attribute names - keyStr = strconv.Quote(keyStr) - } - } else { - keyStr = strings.TrimSpace(string(key.Expr().BuildTokens(nil).Bytes())) - keyStr = "(" + keyStr + ")" // wrap in parentheses so non-literal expressions can be used as attribute names - } - body.SetAttributeRaw(keyStr, value.Expr().BuildTokens(nil)) -} - // popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them. func popRootAttrs(body *hclwrite.Body) (attrVals, error) { - var ( - reqNames = []string{ - nProviderName, - nInstanceSizeSrc, - } - optNames = []string{ - nElectableNodes, - nReadOnlyNodes, - nAnalyticsNodes, - nDiskSizeGB, - nDiskGBEnabledSrc, - nComputeEnabledSrc, - nComputeMinInstanceSizeSrc, - nComputeMaxInstanceSizeSrc, - nComputeScaleDownEnabledSrc, - nEBSVolumeTypeSrc, - nDiskIOPSSrc, - } - req = make(map[string]hclwrite.Tokens) - opt = make(map[string]hclwrite.Tokens) - ) - for _, name := range reqNames { - tokens, err := hcl.PopAttr(body, name, errRepSpecs) - if err != nil { - return attrVals{}, err - } - req[name] = tokens - } - for _, name := range optNames { - tokens, _ := hcl.PopAttr(body, name, errRepSpecs) - if tokens != nil { - opt[name] = tokens + type attrConfig struct { + name string + required bool + } + + attrs := []attrConfig{ + // Required attributes + {nProviderName, true}, + {nInstanceSizeSrc, true}, + // Optional attributes + {nElectableNodes, false}, + {nReadOnlyNodes, false}, + {nAnalyticsNodes, false}, + {nDiskSizeGB, false}, + {nDiskGBEnabledSrc, false}, + {nComputeEnabledSrc, false}, + {nComputeMinInstanceSizeSrc, false}, + {nComputeMaxInstanceSizeSrc, false}, + {nComputeScaleDownEnabledSrc, false}, + {nEBSVolumeTypeSrc, false}, + {nDiskIOPSSrc, false}, + } + + req := make(map[string]hclwrite.Tokens) + opt := make(map[string]hclwrite.Tokens) + + for _, attr := range attrs { + tokens, err := hcl.PopAttr(body, attr.name, errRepSpecs) + if attr.required { + if err != nil { + return attrVals{}, err + } + req[attr.name] = tokens + } else if tokens != nil { + opt[attr.name] = tokens } } + return attrVals{req: req, opt: opt}, nil } diff --git a/internal/convert/shared.go b/internal/convert/shared.go index ffdff9f..76c846f 100644 --- a/internal/convert/shared.go +++ b/internal/convert/shared.go @@ -4,8 +4,10 @@ import ( "errors" "fmt" "slices" + "strconv" "strings" + "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" ) @@ -14,6 +16,18 @@ var ( errDynamicBlockAlone = errors.New("dynamic block must be the only block, see docs for more information") ) +// addConversionComments adds appropriate comments to a converted block +func addConversionComments(block *hclwrite.Block, isUpdatedComment bool) { + blockb := block.Body() + blockb.AppendNewline() + if isUpdatedComment { + hcl.AppendComment(blockb, commentUpdatedBy) + } else { + hcl.AppendComment(blockb, commentGeneratedBy) + hcl.AppendComment(blockb, commentConfirmReferences) + } +} + // hasVariableNumShards checks if any block has a variable (non-literal) num_shards attribute func hasVariableNumShards(blocks []*hclwrite.Block) bool { for _, block := range blocks { @@ -163,6 +177,23 @@ func fillAdvConfigOpt(resourceb *hclwrite.Body) { fillBlockOpt(resourceb, nAdvConfig) } +// processCommonOptionalBlocks processes tags, labels, and other optional blocks +// This function is used by both adv2v2 and clu2adv conversions +func processCommonOptionalBlocks(resourceb *hclwrite.Body) error { + // Process tags and labels + for _, name := range []string{nTags, nLabels} { + if err := fillTagsLabelsOpt(resourceb, name); err != nil { + return err + } + } + // Process optional configuration blocks + fillAdvConfigOpt(resourceb) + for _, name := range []string{nBiConnector, nPinnedFCV, nTimeouts} { + fillBlockOpt(resourceb, name) + } + return nil +} + // buildForExpr builds a for expression with the given variable and collection func buildForExpr(varName, collection string, trailingSpace bool) string { expr := fmt.Sprintf("for %s in %s :", varName, collection) @@ -172,6 +203,26 @@ func buildForExpr(varName, collection string, trailingSpace bool) string { return expr } +// handleZoneName adds zone_name attribute to the body if present in source +func handleZoneName(targetBody, sourceBody *hclwrite.Body, blockName, varName string) { + if zoneNameAttr := sourceBody.GetAttribute(nZoneName); zoneNameAttr != nil { + zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), blockName, varName) + targetBody.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr)) + } +} + +// buildNumShardsTokens builds tokens for handling num_shards with for loops +func buildNumShardsTokens(numShardsAttr *hclwrite.Attribute, repSpecb *hclwrite.Body, + blockName, varName string) hclwrite.Tokens { + if numShardsAttr == nil { + return hcl.TokensArraySingle(repSpecb) + } + numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), blockName, varName) + tokens := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false)) + tokens = append(tokens, hcl.TokensObject(repSpecb)...) + return hcl.EncloseBracketsNewLines(tokens) +} + func fillTagsLabelsOpt(resourceb *hclwrite.Body, name string) error { tokensDynamic, err := extractTagsLabelsDynamicBlock(resourceb, name) if err != nil { @@ -235,3 +286,22 @@ func extractTagsLabelsIndividual(resourceb *hclwrite.Body, name string) (hclwrit } return hcl.TokensObject(fileb), nil } + +func replaceDynamicBlockExpr(attr *hclwrite.Attribute, blockName, attrName string) string { + expr := hcl.GetAttrExpr(attr) + return strings.ReplaceAll(expr, fmt.Sprintf("%s.%s", blockName, attrName), attrName) +} + +func setKeyValue(body *hclwrite.Body, key, value *hclwrite.Attribute) { + keyStr, err := hcl.GetAttrString(key) + if err == nil { + if !hclsyntax.ValidIdentifier(keyStr) { + // wrap in quotes so invalid identifiers (e.g. with blanks) can be used as attribute names + keyStr = strconv.Quote(keyStr) + } + } else { + keyStr = strings.TrimSpace(string(key.Expr().BuildTokens(nil).Bytes())) + keyStr = "(" + keyStr + ")" // wrap in parentheses so non-literal expressions can be used as attribute names + } + body.SetAttributeRaw(keyStr, value.Expr().BuildTokens(nil)) +} From 613f19443ac3388a60e2827963b6a23533e321d3 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 09:39:02 +0200 Subject: [PATCH 08/15] remove unneeded checks --- internal/convert/adv2v2.go | 28 ---------------------------- internal/convert/clu2adv.go | 28 ---------------------------- 2 files changed, 56 deletions(-) diff --git a/internal/convert/adv2v2.go b/internal/convert/adv2v2.go index 862ea6a..259a154 100644 --- a/internal/convert/adv2v2.go +++ b/internal/convert/adv2v2.go @@ -128,48 +128,20 @@ func processRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwri // Handle static region_configs blocks inside dynamic replication_specs specBody := dSpec.content.Body() - - // Collect static region_configs blocks staticConfigs := collectBlocks(specBody, nConfig) - if len(staticConfigs) == 0 { - // No static blocks found, this might be an error case - // Check if there's also no dynamic block (which would have been handled above) - hasDynamicBlock := false - for _, block := range specBody.Blocks() { - if block.Type() == nDynamic && getResourceName(block) == nConfig { - hasDynamicBlock = true - break - } - } - if !hasDynamicBlock { - return dynamicBlock{}, fmt.Errorf("replication_specs must have at least one region_configs") - } - // There's a dynamic block but convertConfigsWithDynamicBlock returned empty - // This shouldn't happen, but return the error from that function - return dynamicBlock{}, nil - } - repSpecb := hclwrite.NewEmptyFile().Body() - - // Handle zone_name attribute handleZoneName(repSpecb, specBody, nRepSpecs, nSpec) - - // Process static region_configs blocks var configs []*hclwrite.Body for _, configBlock := range staticConfigs { configBlockb := configBlock.Body() newConfigBody := processConfigForDynamicBlock(configBlockb, diskSizeGB) configs = append(configs, newConfigBody) } - repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) - - // Handle num_shards attribute numShardsAttr := specBody.GetAttribute(nNumShards) forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) numShardsTokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec) dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, numShardsTokens...)) - return dSpec, nil } diff --git a/internal/convert/clu2adv.go b/internal/convert/clu2adv.go index 869e98e..11a08ca 100644 --- a/internal/convert/clu2adv.go +++ b/internal/convert/clu2adv.go @@ -219,8 +219,6 @@ func processRepSpecsClusterWithDynamicBlock(resourceb *hclwrite.Body, root attrV if err != nil { return dynamicBlock{}, err } - - // Check if we have a dynamic region_configs block that was successfully processed if dConfig.tokens != nil { forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) forSpec = append(forSpec, dConfig.tokens...) @@ -231,31 +229,9 @@ func processRepSpecsClusterWithDynamicBlock(resourceb *hclwrite.Body, root attrV // Handle static region_configs blocks inside dynamic replication_specs specBody := dSpec.content.Body() - - // Collect static region_configs blocks staticConfigs := collectBlocks(specBody, nConfigSrc) - if len(staticConfigs) == 0 { - // No static blocks found, check if there's also no dynamic block - hasDynamicBlock := false - for _, block := range specBody.Blocks() { - if block.Type() == nDynamic && getResourceName(block) == nConfigSrc { - hasDynamicBlock = true - break - } - } - if !hasDynamicBlock { - return dynamicBlock{}, fmt.Errorf("replication_specs must have at least one regions_config") - } - // There's a dynamic block but fillConfigsWithDynamicRegion returned empty - return dynamicBlock{}, nil - } - repSpecb := hclwrite.NewEmptyFile().Body() - - // Handle zone_name attribute handleZoneName(repSpecb, specBody, nRepSpecs, nSpec) - - // Process static region_configs blocks var configs []*hclwrite.Body for _, configBlock := range staticConfigs { config, err := getRegionConfig(configBlock, root, false) @@ -264,16 +240,12 @@ func processRepSpecsClusterWithDynamicBlock(resourceb *hclwrite.Body, root attrV } configs = append(configs, config) } - configs = sortConfigsByPriority(configs) repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) - - // Handle num_shards attribute numShardsAttr := specBody.GetAttribute(nNumShards) forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) numShardsTokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec) dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, numShardsTokens...)) - return dSpec, nil } From 12ecf5303d0db4f76b230426aa1d025048c45792 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 09:42:37 +0200 Subject: [PATCH 09/15] refactor comments --- internal/convert/adv2v2.go | 8 -------- internal/convert/shared.go | 4 ++-- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/internal/convert/adv2v2.go b/internal/convert/adv2v2.go index 259a154..decde00 100644 --- a/internal/convert/adv2v2.go +++ b/internal/convert/adv2v2.go @@ -198,12 +198,10 @@ func copyAttributesSorted(targetBody *hclwrite.Body, sourceAttrs map[string]*hcl } func processAllSpecs(body *hclwrite.Body, diskSizeGB hclwrite.Tokens) { - // Process specs that need diskSizeGB specsWithDisk := []string{nElectableSpecs, nReadOnlySpecs, nAnalyticsSpecs} for _, spec := range specsWithDisk { fillSpecOpt(body, spec, diskSizeGB) } - // Process specs without diskSizeGB specsWithoutDisk := []string{nAutoScaling, nAnalyticsAutoScaling} for _, spec := range specsWithoutDisk { fillSpecOpt(body, spec, nil) @@ -211,10 +209,7 @@ func processAllSpecs(body *hclwrite.Body, diskSizeGB hclwrite.Tokens) { } func processConfigForDynamicBlock(configBlockb *hclwrite.Body, diskSizeGB hclwrite.Tokens) *hclwrite.Body { - // Create a new body with sorted attributes newConfigBody := hclwrite.NewEmptyFile().Body() - - // Copy attributes in the expected order (priority, provider_name, region_name first) attrs := configBlockb.Attributes() orderedAttrs := []string{nPriority, nProviderName, nRegionName} for _, attrName := range orderedAttrs { @@ -222,8 +217,6 @@ func processConfigForDynamicBlock(configBlockb *hclwrite.Body, diskSizeGB hclwri newConfigBody.SetAttributeRaw(attrName, attr.Expr().BuildTokens(nil)) } } - - // Process spec blocks and convert them to attributes for _, block := range configBlockb.Blocks() { blockType := block.Type() blockBody := hclwrite.NewEmptyFile().Body() @@ -234,7 +227,6 @@ func processConfigForDynamicBlock(configBlockb *hclwrite.Body, diskSizeGB hclwri } newConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) } - return newConfigBody } diff --git a/internal/convert/shared.go b/internal/convert/shared.go index 76c846f..4a69d9f 100644 --- a/internal/convert/shared.go +++ b/internal/convert/shared.go @@ -296,12 +296,12 @@ func setKeyValue(body *hclwrite.Body, key, value *hclwrite.Attribute) { keyStr, err := hcl.GetAttrString(key) if err == nil { if !hclsyntax.ValidIdentifier(keyStr) { - // wrap in quotes so invalid identifiers (e.g. with blanks) can be used as attribute names + // Wrap in quotes so invalid identifiers (e.g. with blanks) can be used as attribute names keyStr = strconv.Quote(keyStr) } } else { keyStr = strings.TrimSpace(string(key.Expr().BuildTokens(nil).Bytes())) - keyStr = "(" + keyStr + ")" // wrap in parentheses so non-literal expressions can be used as attribute names + keyStr = "(" + keyStr + ")" // Wrap in parentheses so non-literal expressions can be used as attribute names } body.SetAttributeRaw(keyStr, value.Expr().BuildTokens(nil)) } From 5b4a720a17429aa8c8f2f64867073a17edf104b3 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 09:53:56 +0200 Subject: [PATCH 10/15] fix e2e test --- internal/convert/adv2v2.go | 2 +- internal/convert/clu2adv.go | 2 +- internal/convert/shared.go | 4 ++-- test/e2e/e2e_helper.go | 2 ++ 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/convert/adv2v2.go b/internal/convert/adv2v2.go index decde00..8145c25 100644 --- a/internal/convert/adv2v2.go +++ b/internal/convert/adv2v2.go @@ -23,7 +23,7 @@ func AdvancedClusterToV2(config []byte) ([]byte, error) { return nil, err } if updated { - addConversionComments(block, true) + addComments(block, true) } } return parser.Bytes(), nil diff --git a/internal/convert/clu2adv.go b/internal/convert/clu2adv.go index 11a08ca..7b113f1 100644 --- a/internal/convert/clu2adv.go +++ b/internal/convert/clu2adv.go @@ -40,7 +40,7 @@ func ClusterToAdvancedCluster(config []byte, includeMoved bool) ([]byte, error) } convertedDataSource := convertDataSource(block) if convertedResource || convertedDataSource { - addConversionComments(block, false) + addComments(block, false) } } fillMovedBlocks(parserb, moveLabels) diff --git a/internal/convert/shared.go b/internal/convert/shared.go index 4a69d9f..4061800 100644 --- a/internal/convert/shared.go +++ b/internal/convert/shared.go @@ -16,8 +16,8 @@ var ( errDynamicBlockAlone = errors.New("dynamic block must be the only block, see docs for more information") ) -// addConversionComments adds appropriate comments to a converted block -func addConversionComments(block *hclwrite.Block, isUpdatedComment bool) { +// addComments adds appropriate comments to a converted block +func addComments(block *hclwrite.Block, isUpdatedComment bool) { blockb := block.Body() blockb.AppendNewline() if isUpdatedComment { diff --git a/test/e2e/e2e_helper.go b/test/e2e/e2e_helper.go index 2749d0a..cd6f7af 100644 --- a/test/e2e/e2e_helper.go +++ b/test/e2e/e2e_helper.go @@ -14,6 +14,8 @@ import ( func RunTF(args ...string) (string, error) { args = append([]string{"tf"}, args...) cmd := exec.CommandContext(context.Background(), "atlas", args...) + // Silence the storage warning that appears in CI environments + cmd.Env = append(os.Environ(), "ATLAS_SILENCE_STORAGE_WARNING=true") resp, err := cmd.CombinedOutput() return string(resp), err } From 6dbff357385aa9fe2bafd0286ff7d93dd3b8e7f1 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 10:05:09 +0200 Subject: [PATCH 11/15] silent warning --- test/e2e/e2e_helper.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/e2e/e2e_helper.go b/test/e2e/e2e_helper.go index cd6f7af..fedd794 100644 --- a/test/e2e/e2e_helper.go +++ b/test/e2e/e2e_helper.go @@ -12,10 +12,11 @@ import ( ) func RunTF(args ...string) (string, error) { + // Ensure Atlas CLI storage warning is silenced before running tests as it is not enabled in GitHub Actions + exec.Command("atlas", "config", "set", "silence_storage_warning", "true").Run() + args = append([]string{"tf"}, args...) cmd := exec.CommandContext(context.Background(), "atlas", args...) - // Silence the storage warning that appears in CI environments - cmd.Env = append(os.Environ(), "ATLAS_SILENCE_STORAGE_WARNING=true") resp, err := cmd.CombinedOutput() return string(resp), err } From 22f220e59eff7b17c504e3c39a7b6f205ea21cd1 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 10:07:50 +0200 Subject: [PATCH 12/15] fix linter --- test/e2e/e2e_helper.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/e2e/e2e_helper.go b/test/e2e/e2e_helper.go index fedd794..f7a0af2 100644 --- a/test/e2e/e2e_helper.go +++ b/test/e2e/e2e_helper.go @@ -12,11 +12,13 @@ import ( ) func RunTF(args ...string) (string, error) { + ctx := context.Background() + // Ensure Atlas CLI storage warning is silenced before running tests as it is not enabled in GitHub Actions - exec.Command("atlas", "config", "set", "silence_storage_warning", "true").Run() + _ = exec.CommandContext(ctx, "atlas", "config", "set", "silence_storage_warning", "true").Run() args = append([]string{"tf"}, args...) - cmd := exec.CommandContext(context.Background(), "atlas", args...) + cmd := exec.CommandContext(ctx, "atlas", args...) resp, err := cmd.CombinedOutput() return string(resp), err } From b5e661dfc466772ce23b0389ec91f574da6e2bc2 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 12:11:08 +0200 Subject: [PATCH 13/15] run silence_storage_warning only GHA --- test/e2e/e2e_helper.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/e2e/e2e_helper.go b/test/e2e/e2e_helper.go index f7a0af2..b16bbc4 100644 --- a/test/e2e/e2e_helper.go +++ b/test/e2e/e2e_helper.go @@ -4,6 +4,7 @@ import ( "context" "os" "os/exec" + "strconv" "testing" "github.com/spf13/afero" @@ -15,7 +16,10 @@ func RunTF(args ...string) (string, error) { ctx := context.Background() // Ensure Atlas CLI storage warning is silenced before running tests as it is not enabled in GitHub Actions - _ = exec.CommandContext(ctx, "atlas", "config", "set", "silence_storage_warning", "true").Run() + inCI, _ := strconv.ParseBool(os.Getenv("CI")) + if inCI { + _ = exec.CommandContext(ctx, "atlas", "config", "set", "silence_storage_warning", "true").Run() + } args = append([]string{"tf"}, args...) cmd := exec.CommandContext(ctx, "atlas", args...) From 7904329c4719c7432108fd73fd942ed8140d609b Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 12:35:32 +0200 Subject: [PATCH 14/15] refactor specs --- internal/convert/adv2v2.go | 18 +++++++----------- internal/convert/clu2adv.go | 27 +++++++++------------------ internal/convert/shared.go | 7 ++----- 3 files changed, 18 insertions(+), 34 deletions(-) diff --git a/internal/convert/adv2v2.go b/internal/convert/adv2v2.go index 8145c25..d070203 100644 --- a/internal/convert/adv2v2.go +++ b/internal/convert/adv2v2.go @@ -8,6 +8,11 @@ import ( "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" ) +var ( + specsWithDisk = []string{nElectableSpecs, nReadOnlySpecs, nAnalyticsSpecs} + specsWithoutDisk = []string{nAutoScaling, nAnalyticsAutoScaling} +) + // AdvancedClusterToV2 transforms all mongodbatlas_advanced_cluster resource definitions in a // Terraform configuration file from SDKv2 schema to TPF (Terraform Plugin Framework) schema. // All other resources and data sources are left untouched. @@ -198,11 +203,9 @@ func copyAttributesSorted(targetBody *hclwrite.Body, sourceAttrs map[string]*hcl } func processAllSpecs(body *hclwrite.Body, diskSizeGB hclwrite.Tokens) { - specsWithDisk := []string{nElectableSpecs, nReadOnlySpecs, nAnalyticsSpecs} for _, spec := range specsWithDisk { fillSpecOpt(body, spec, diskSizeGB) } - specsWithoutDisk := []string{nAutoScaling, nAnalyticsAutoScaling} for _, spec := range specsWithoutDisk { fillSpecOpt(body, spec, nil) } @@ -210,19 +213,12 @@ func processAllSpecs(body *hclwrite.Body, diskSizeGB hclwrite.Tokens) { func processConfigForDynamicBlock(configBlockb *hclwrite.Body, diskSizeGB hclwrite.Tokens) *hclwrite.Body { newConfigBody := hclwrite.NewEmptyFile().Body() - attrs := configBlockb.Attributes() - orderedAttrs := []string{nPriority, nProviderName, nRegionName} - for _, attrName := range orderedAttrs { - if attr := attrs[attrName]; attr != nil { - newConfigBody.SetAttributeRaw(attrName, attr.Expr().BuildTokens(nil)) - } - } + copyAttributesSorted(newConfigBody, configBlockb.Attributes()) for _, block := range configBlockb.Blocks() { blockType := block.Type() blockBody := hclwrite.NewEmptyFile().Body() copyAttributesSorted(blockBody, block.Body().Attributes()) - if diskSizeGB != nil && - (blockType == nElectableSpecs || blockType == nReadOnlySpecs || blockType == nAnalyticsSpecs) { + if diskSizeGB != nil && slices.Contains(specsWithDisk, blockType) { blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB) } newConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) diff --git a/internal/convert/clu2adv.go b/internal/convert/clu2adv.go index 7b113f1..6519344 100644 --- a/internal/convert/clu2adv.go +++ b/internal/convert/clu2adv.go @@ -314,36 +314,27 @@ func getRegionConfig(configSrc *hclwrite.Block, root attrVals, isDynamicBlock bo if err := hcl.MoveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil { return nil, err } - // Process all spec types - specMappings := []struct { - nodeName string - specName string - }{ - {nElectableNodes, nElectableSpecs}, - {nReadOnlyNodes, nReadOnlySpecs}, - {nAnalyticsNodes, nAnalyticsSpecs}, - } - for _, mapping := range specMappings { - if spec, _ := getSpec(configSrc, mapping.nodeName, root, isDynamicBlock); spec != nil { - fileb.SetAttributeRaw(mapping.specName, spec) - } - } + processSpec(fileb, configSrc, nElectableSpecs, nElectableNodes, root, isDynamicBlock) + processSpec(fileb, configSrc, nReadOnlySpecs, nReadOnlyNodes, root, isDynamicBlock) + processSpec(fileb, configSrc, nAnalyticsSpecs, nAnalyticsNodes, root, isDynamicBlock) + if autoScaling := getAutoScalingOpt(root.opt); autoScaling != nil { fileb.SetAttributeRaw(nAutoScaling, autoScaling) } return fileb, nil } -func getSpec(configSrc *hclwrite.Block, countName string, root attrVals, isDynamicBlock bool) (hclwrite.Tokens, error) { +func processSpec(configb *hclwrite.Body, configSrc *hclwrite.Block, + specName, countName string, root attrVals, isDynamicBlock bool) { var ( fileb = hclwrite.NewEmptyFile().Body() count = configSrc.Body().GetAttribute(countName) ) if count == nil { - return nil, fmt.Errorf("%s: attribute %s not found", errRepSpecs, countName) + return } if countVal, errVal := hcl.GetAttrInt(count, errRepSpecs); countVal == 0 && errVal == nil { - return nil, fmt.Errorf("%s: attribute %s is 0", errRepSpecs, countName) + return } fileb.SetAttributeRaw(nNodeCount, count.Expr().BuildTokens(nil)) fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc]) @@ -360,7 +351,7 @@ func getSpec(configSrc *hclwrite.Block, countName string, root attrVals, isDynam if isDynamicBlock { tokens = append(hcl.TokensFromExpr(fmt.Sprintf("%s == 0 ? null :", hcl.GetAttrExpr(count))), tokens...) } - return tokens, nil + configb.SetAttributeRaw(specName, tokens) } func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens { diff --git a/internal/convert/shared.go b/internal/convert/shared.go index 4061800..800f9a8 100644 --- a/internal/convert/shared.go +++ b/internal/convert/shared.go @@ -177,16 +177,13 @@ func fillAdvConfigOpt(resourceb *hclwrite.Body) { fillBlockOpt(resourceb, nAdvConfig) } -// processCommonOptionalBlocks processes tags, labels, and other optional blocks -// This function is used by both adv2v2 and clu2adv conversions +// processCommonOptionalBlocks processes tags, labels, and other optional blocks. func processCommonOptionalBlocks(resourceb *hclwrite.Body) error { - // Process tags and labels for _, name := range []string{nTags, nLabels} { if err := fillTagsLabelsOpt(resourceb, name); err != nil { return err } } - // Process optional configuration blocks fillAdvConfigOpt(resourceb) for _, name := range []string{nBiConnector, nPinnedFCV, nTimeouts} { fillBlockOpt(resourceb, name) @@ -194,7 +191,7 @@ func processCommonOptionalBlocks(resourceb *hclwrite.Body) error { return nil } -// buildForExpr builds a for expression with the given variable and collection +// buildForExpr builds a for expression with the given variable and collection. func buildForExpr(varName, collection string, trailingSpace bool) string { expr := fmt.Sprintf("for %s in %s :", varName, collection) if trailingSpace { From b2da8e1135e57a608ad55f0a5f971de7b3a76dd7 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Sep 2025 12:42:41 +0200 Subject: [PATCH 15/15] revert to more explicit code --- internal/convert/clu2adv.go | 86 +++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 47 deletions(-) diff --git a/internal/convert/clu2adv.go b/internal/convert/clu2adv.go index 6519344..914ef64 100644 --- a/internal/convert/clu2adv.go +++ b/internal/convert/clu2adv.go @@ -113,18 +113,14 @@ func processFreeTierCluster(resourceb *hclwrite.Body) error { configb := hclwrite.NewEmptyFile().Body() hcl.SetAttrInt(configb, nPriority, valMaxPriority) - // Move required attributes for free tier cluster - moveAttrs := []struct { - src, dst string - }{ - {nRegionNameSrc, nRegionName}, - {nProviderName, nProviderName}, - {nBackingProviderName, nBackingProviderName}, - } - for _, attr := range moveAttrs { - if err := hcl.MoveAttr(resourceb, configb, attr.src, attr.dst, errFreeCluster); err != nil { - return err - } + if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { + return err + } + if err := hcl.MoveAttr(resourceb, configb, nProviderName, nProviderName, errFreeCluster); err != nil { + return err + } + if err := hcl.MoveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil { + return err } electableSpecb := hclwrite.NewEmptyFile().Body() if err := hcl.MoveAttr(resourceb, electableSpecb, nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil { @@ -432,43 +428,39 @@ func sortConfigsByPriority(configs []*hclwrite.Body) []*hclwrite.Body { // popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them. func popRootAttrs(body *hclwrite.Body) (attrVals, error) { - type attrConfig struct { - name string - required bool - } - - attrs := []attrConfig{ - // Required attributes - {nProviderName, true}, - {nInstanceSizeSrc, true}, - // Optional attributes - {nElectableNodes, false}, - {nReadOnlyNodes, false}, - {nAnalyticsNodes, false}, - {nDiskSizeGB, false}, - {nDiskGBEnabledSrc, false}, - {nComputeEnabledSrc, false}, - {nComputeMinInstanceSizeSrc, false}, - {nComputeMaxInstanceSizeSrc, false}, - {nComputeScaleDownEnabledSrc, false}, - {nEBSVolumeTypeSrc, false}, - {nDiskIOPSSrc, false}, + var ( + reqNames = []string{ + nProviderName, + nInstanceSizeSrc, + } + optNames = []string{ + nElectableNodes, + nReadOnlyNodes, + nAnalyticsNodes, + nDiskSizeGB, + nDiskGBEnabledSrc, + nComputeEnabledSrc, + nComputeMinInstanceSizeSrc, + nComputeMaxInstanceSizeSrc, + nComputeScaleDownEnabledSrc, + nEBSVolumeTypeSrc, + nDiskIOPSSrc, + } + req = make(map[string]hclwrite.Tokens) + opt = make(map[string]hclwrite.Tokens) + ) + for _, name := range reqNames { + tokens, err := hcl.PopAttr(body, name, errRepSpecs) + if err != nil { + return attrVals{}, err + } + req[name] = tokens } - - req := make(map[string]hclwrite.Tokens) - opt := make(map[string]hclwrite.Tokens) - - for _, attr := range attrs { - tokens, err := hcl.PopAttr(body, attr.name, errRepSpecs) - if attr.required { - if err != nil { - return attrVals{}, err - } - req[attr.name] = tokens - } else if tokens != nil { - opt[attr.name] = tokens + for _, name := range optNames { + tokens, _ := hcl.PopAttr(body, name, errRepSpecs) + if tokens != nil { + opt[name] = tokens } } - return attrVals{req: req, opt: opt}, nil }