diff --git a/docs/command_adv2v2.md b/docs/command_adv2v2.md index d24f919..02afb04 100644 --- a/docs/command_adv2v2.md +++ b/docs/command_adv2v2.md @@ -26,6 +26,15 @@ atlas tf adv2v2 -f in.tf -o out.tf - `--replaceOutput` or `-r`: Overwrite the file at the output path if it already exists. You can also modify the input file in-place. - `--watch` or `-w`: Keep the plugin running and watching for changes in the input file +## Comments and formatting + +During the conversion process, some formatting elements may not be preserved: +- Some comments from the original resources may not be preserved in the output +- Custom blank lines and spacing may be modified +- The output file will have standardized formatting + +We recommend reviewing the converted output and re-adding any important comments or documentation that you need to maintain. + ## Examples You can find [here](https://github.com/mongodb-labs/atlas-cli-plugin-terraform/tree/main/internal/convert/testdata/adv2v2) examples of input files (suffix .in.tf) and the corresponding output files (suffix .out.tf). @@ -56,6 +65,7 @@ dynamic "tags" { ### Dynamic blocks in region_configs You can use `dynamic` blocks for `region_configs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects. +**Note:** `map` and `set` are not supported. This is an example of how to use dynamic blocks in `region_configs`: ```hcl @@ -81,6 +91,7 @@ replication_specs { ### Dynamic blocks in replication_specs You can use `dynamic` blocks for `replication_specs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects. +**Note:** `map` and `set` are not supported. This is an example of how to use dynamic blocks in `replication_specs`: ```hcl diff --git a/docs/command_clu2adv.md b/docs/command_clu2adv.md index 88986b5..ba10a2e 100644 --- a/docs/command_clu2adv.md +++ b/docs/command_clu2adv.md @@ -26,6 +26,15 @@ atlas tf clu2adv -f in.tf -o out.tf - `--watch` or `-w`: Keep the plugin running and watching for changes in the input file - `--includeMoved` or `-m`: Include the `moved blocks` in the output file +## Comments and formatting + +During the conversion process, some formatting elements may not be preserved: +- Some comments from the original resources may not be preserved in the output +- Custom blank lines and spacing may be modified +- The output file will have standardized formatting + +We recommend reviewing the converted output and re-adding any important comments or documentation that you need to maintain. + ## Examples You can find [here](https://github.com/mongodb-labs/atlas-cli-plugin-terraform/tree/main/internal/convert/testdata/clu2adv) some examples of input files (suffix .in.tf) and the corresponding output files (suffix .out.tf). @@ -56,6 +65,7 @@ dynamic "tags" { ### Dynamic blocks in regions_config You can use `dynamic` blocks for `regions_config`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects. +**Note:** `map` and `set` are not supported. This is an example of how to use dynamic blocks in `regions_config`: ```hcl @@ -77,6 +87,7 @@ replication_specs { ### Dynamic blocks in replication_specs You can use `dynamic` blocks for `replication_specs`. The plugin assumes that the value of `for_each` is an expression which evaluates to a `list` of objects. +**Note:** `map` and `set` are not supported. This is an example of how to use dynamic blocks in `replication_specs`: ```hcl diff --git a/internal/convert/adv2v2.go b/internal/convert/adv2v2.go index 734da45..d070203 100644 --- a/internal/convert/adv2v2.go +++ b/internal/convert/adv2v2.go @@ -8,6 +8,11 @@ import ( "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" ) +var ( + specsWithDisk = []string{nElectableSpecs, nReadOnlySpecs, nAnalyticsSpecs} + specsWithoutDisk = []string{nAutoScaling, nAnalyticsAutoScaling} +) + // AdvancedClusterToV2 transforms all mongodbatlas_advanced_cluster resource definitions in a // Terraform configuration file from SDKv2 schema to TPF (Terraform Plugin Framework) schema. // All other resources and data sources are left untouched. @@ -18,20 +23,18 @@ func AdvancedClusterToV2(config []byte) ([]byte, error) { } parserb := parser.Body() for _, block := range parserb.Blocks() { - updated, err := updateResource(block) + updated, err := processResource(block) if err != nil { return nil, err } - if updated { // If the resource was converted, add a comment at the end so user knows the resource was updated - blockb := block.Body() - blockb.AppendNewline() - hcl.AppendComment(blockb, commentUpdatedBy) + if updated { + addComments(block, true) } } return parser.Bytes(), nil } -func updateResource(resource *hclwrite.Block) (bool, error) { +func processResource(resource *hclwrite.Block) (bool, error) { if resource.Type() != resourceType || getResourceName(resource) != advCluster { return false, nil } @@ -43,24 +46,17 @@ func updateResource(resource *hclwrite.Block) (bool, error) { return false, nil } diskSizeGB, _ := hcl.PopAttr(resourceb, nDiskSizeGB, errRoot) // ok to fail as it's optional - if err := convertRepSpecs(resourceb, diskSizeGB); err != nil { - return false, err - } - if err := fillTagsLabelsOpt(resourceb, nTags); err != nil { + if err := processRepSpecs(resourceb, diskSizeGB); err != nil { return false, err } - if err := fillTagsLabelsOpt(resourceb, nLabels); err != nil { + if err := processCommonOptionalBlocks(resourceb); err != nil { return false, err } - fillAdvConfigOpt(resourceb) - fillBlockOpt(resourceb, nBiConnector) - fillBlockOpt(resourceb, nPinnedFCV) - fillBlockOpt(resourceb, nTimeouts) return true, nil } -func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error { - d, err := convertRepSpecsWithDynamicBlock(resourceb, diskSizeGB) +func processRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error { + d, err := processRepSpecsWithDynamicBlock(resourceb, diskSizeGB) if err != nil { return err } @@ -80,7 +76,7 @@ func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error blockb := block.Body() shardsAttr := blockb.GetAttribute(nNumShards) blockb.RemoveAttribute(nNumShards) - dConfig, err := convertConfigsWithDynamicBlock(blockb, diskSizeGB, false) + dConfig, err := processConfigsWithDynamicBlock(blockb, diskSizeGB, false) if err != nil { return err } @@ -119,22 +115,42 @@ func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error return nil } -func convertRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) (dynamicBlock, error) { +func processRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) (dynamicBlock, error) { dSpec, err := getDynamicBlock(resourceb, nRepSpecs, true) if err != nil || !dSpec.IsPresent() { return dynamicBlock{}, err } transformReferences(dSpec.content.Body(), nRepSpecs, nSpec) - dConfig, err := convertConfigsWithDynamicBlock(dSpec.content.Body(), diskSizeGB, true) + dConfig, err := processConfigsWithDynamicBlock(dSpec.content.Body(), diskSizeGB, true) if err != nil { return dynamicBlock{}, err } + if dConfig.tokens != nil { + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, dConfig.tokens...)) + return dSpec, nil + } + + // Handle static region_configs blocks inside dynamic replication_specs + specBody := dSpec.content.Body() + staticConfigs := collectBlocks(specBody, nConfig) + repSpecb := hclwrite.NewEmptyFile().Body() + handleZoneName(repSpecb, specBody, nRepSpecs, nSpec) + var configs []*hclwrite.Body + for _, configBlock := range staticConfigs { + configBlockb := configBlock.Body() + newConfigBody := processConfigForDynamicBlock(configBlockb, diskSizeGB) + configs = append(configs, newConfigBody) + } + repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) + numShardsAttr := specBody.GetAttribute(nNumShards) forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) - dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, dConfig.tokens...)) + numShardsTokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, numShardsTokens...)) return dSpec, nil } -func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite.Tokens, +func processConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite.Tokens, insideDynamicRepSpec bool) (dynamicBlock, error) { d, err := getDynamicBlock(specbSrc, nConfig, true) if err != nil || !d.IsPresent() { @@ -142,18 +158,7 @@ func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite } configBody := d.content.Body() transformReferences(configBody, getResourceName(d.block), nRegion) - regionConfigBody := hclwrite.NewEmptyFile().Body() - copyAttributesSorted(regionConfigBody, configBody.Attributes()) - for _, block := range configBody.Blocks() { - blockType := block.Type() - blockBody := hclwrite.NewEmptyFile().Body() - copyAttributesSorted(blockBody, block.Body().Attributes()) - if diskSizeGB != nil && - (blockType == nElectableSpecs || blockType == nReadOnlySpecs || blockType == nAnalyticsSpecs) { - blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB) - } - regionConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) - } + regionConfigBody := processConfigForDynamicBlock(configBody, diskSizeGB) forEach := hcl.GetAttrExpr(d.forEach) if insideDynamicRepSpec { forEach = fmt.Sprintf("%s.%s", nSpec, nConfig) @@ -165,18 +170,11 @@ func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite return d, nil } repSpecb := hclwrite.NewEmptyFile().Body() - if zoneNameAttr := specbSrc.GetAttribute(nZoneName); zoneNameAttr != nil { - zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), nRepSpecs, nSpec) - repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr)) - } + handleZoneName(repSpecb, specbSrc, nRepSpecs, nSpec) repSpecb.SetAttributeRaw(nConfig, hcl.EncloseBracketsNewLines(regionTokens)) - if numShardsAttr := specbSrc.GetAttribute(nNumShards); numShardsAttr != nil { - numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), nRepSpecs, nSpec) - tokens := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false)) - tokens = append(tokens, hcl.TokensObject(repSpecb)...) - return dynamicBlock{tokens: hcl.EncloseBracketsNewLines(tokens)}, nil - } - return dynamicBlock{tokens: hcl.TokensArraySingle(repSpecb)}, nil + numShardsAttr := specbSrc.GetAttribute(nNumShards) + tokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec) + return dynamicBlock{tokens: tokens}, nil } // hasExpectedBlocksAsAttributes checks if any of the expected block names @@ -205,11 +203,27 @@ func copyAttributesSorted(targetBody *hclwrite.Body, sourceAttrs map[string]*hcl } func processAllSpecs(body *hclwrite.Body, diskSizeGB hclwrite.Tokens) { - fillSpecOpt(body, nElectableSpecs, diskSizeGB) - fillSpecOpt(body, nReadOnlySpecs, diskSizeGB) - fillSpecOpt(body, nAnalyticsSpecs, diskSizeGB) - fillSpecOpt(body, nAutoScaling, nil) - fillSpecOpt(body, nAnalyticsAutoScaling, nil) + for _, spec := range specsWithDisk { + fillSpecOpt(body, spec, diskSizeGB) + } + for _, spec := range specsWithoutDisk { + fillSpecOpt(body, spec, nil) + } +} + +func processConfigForDynamicBlock(configBlockb *hclwrite.Body, diskSizeGB hclwrite.Tokens) *hclwrite.Body { + newConfigBody := hclwrite.NewEmptyFile().Body() + copyAttributesSorted(newConfigBody, configBlockb.Attributes()) + for _, block := range configBlockb.Blocks() { + blockType := block.Type() + blockBody := hclwrite.NewEmptyFile().Body() + copyAttributesSorted(blockBody, block.Body().Attributes()) + if diskSizeGB != nil && slices.Contains(specsWithDisk, blockType) { + blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB) + } + newConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) + } + return newConfigBody } func fillSpecOpt(resourceb *hclwrite.Body, name string, diskSizeGBTokens hclwrite.Tokens) { diff --git a/internal/convert/clu2adv.go b/internal/convert/clu2adv.go index 620c990..914ef64 100644 --- a/internal/convert/clu2adv.go +++ b/internal/convert/clu2adv.go @@ -3,10 +3,7 @@ package convert import ( "fmt" "sort" - "strconv" - "strings" - "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" "github.com/zclconf/go-cty/cty" @@ -43,10 +40,7 @@ func ClusterToAdvancedCluster(config []byte, includeMoved bool) ([]byte, error) } convertedDataSource := convertDataSource(block) if convertedResource || convertedDataSource { - blockb := block.Body() - blockb.AppendNewline() - hcl.AppendComment(blockb, commentGeneratedBy) - hcl.AppendComment(blockb, commentConfirmReferences) + addComments(block, false) } } fillMovedBlocks(parserb, moveLabels) @@ -64,9 +58,9 @@ func convertResource(block *hclwrite.Block) (bool, error) { } var err error if isFreeTierCluster(blockb) { - err = fillFreeTierCluster(blockb) + err = processFreeTierCluster(blockb) } else { - err = fillCluster(blockb) + err = processCluster(blockb) } if err != nil { return false, err @@ -114,10 +108,11 @@ func fillMovedBlocks(body *hclwrite.Body, moveLabels []string) { } // fillFreeTierCluster is the entry point to convert clusters in free tier -func fillFreeTierCluster(resourceb *hclwrite.Body) error { +func processFreeTierCluster(resourceb *hclwrite.Body) error { resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType)) configb := hclwrite.NewEmptyFile().Body() hcl.SetAttrInt(configb, nPriority, valMaxPriority) + if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { return err } @@ -139,7 +134,7 @@ func fillFreeTierCluster(resourceb *hclwrite.Body) error { } // fillCluster is the entry point to convert clusters with replications_specs (all but free tier) -func fillCluster(resourceb *hclwrite.Body) error { +func processCluster(resourceb *hclwrite.Body) error { root, errRoot := popRootAttrs(resourceb) if errRoot != nil { return errRoot @@ -147,24 +142,14 @@ func fillCluster(resourceb *hclwrite.Body) error { resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs // ok to fail as cloud_backup is optional _ = hcl.MoveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs) - if err := fillRepSpecs(resourceb, root); err != nil { - return err - } - if err := fillTagsLabelsOpt(resourceb, nTags); err != nil { + if err := processRepSpecsCluster(resourceb, root); err != nil { return err } - if err := fillTagsLabelsOpt(resourceb, nLabels); err != nil { - return err - } - fillAdvConfigOpt(resourceb) - fillBlockOpt(resourceb, nBiConnector) - fillBlockOpt(resourceb, nPinnedFCV) - fillBlockOpt(resourceb, nTimeouts) - return nil + return processCommonOptionalBlocks(resourceb) } -func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { - d, err := fillRepSpecsWithDynamicBlock(resourceb, root) +func processRepSpecsCluster(resourceb *hclwrite.Body, root attrVals) error { + d, err := processRepSpecsClusterWithDynamicBlock(resourceb, root) if err != nil { return err } @@ -177,7 +162,7 @@ func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { if len(repSpecBlocks) == 0 { return fmt.Errorf("must have at least one replication_specs") } - dConfig, err := fillConfigsWithDynamicRegion(repSpecBlocks[0].Body(), root, false) + dConfig, err := processConfigsWithDynamicRegion(repSpecBlocks[0].Body(), root, false) if err != nil { return err } @@ -196,7 +181,7 @@ func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { if shardsAttr == nil { return fmt.Errorf("%s: %s not found", errRepSpecs, nNumShards) } - if errConfig := fillRegionConfigs(specb, specbSrc, root); errConfig != nil { + if errConfig := processRegionConfigs(specb, specbSrc, root); errConfig != nil { return errConfig } if hasVariableShards { @@ -220,25 +205,49 @@ func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { } // fillRepSpecsWithDynamicBlock used for dynamic blocks in replication_specs -func fillRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dynamicBlock, error) { +func processRepSpecsClusterWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dynamicBlock, error) { dSpec, err := getDynamicBlock(resourceb, nRepSpecs, true) if err != nil || !dSpec.IsPresent() { return dynamicBlock{}, err } transformReferences(dSpec.content.Body(), nRepSpecs, nSpec) - dConfig, err := fillConfigsWithDynamicRegion(dSpec.content.Body(), root, true) + dConfig, err := processConfigsWithDynamicRegion(dSpec.content.Body(), root, true) if err != nil { return dynamicBlock{}, err } + if dConfig.tokens != nil { + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + forSpec = append(forSpec, dConfig.tokens...) + tokens := hcl.TokensFuncFlatten(forSpec) + dSpec.tokens = tokens + return dSpec, nil + } + + // Handle static region_configs blocks inside dynamic replication_specs + specBody := dSpec.content.Body() + staticConfigs := collectBlocks(specBody, nConfigSrc) + repSpecb := hclwrite.NewEmptyFile().Body() + handleZoneName(repSpecb, specBody, nRepSpecs, nSpec) + var configs []*hclwrite.Body + for _, configBlock := range staticConfigs { + config, err := getRegionConfig(configBlock, root, false) + if err != nil { + return dynamicBlock{}, err + } + configs = append(configs, config) + } + configs = sortConfigsByPriority(configs) + repSpecb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) + numShardsAttr := specBody.GetAttribute(nNumShards) forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) - forSpec = append(forSpec, dConfig.tokens...) - tokens := hcl.TokensFuncFlatten(forSpec) - dSpec.tokens = tokens + numShardsTokens := buildNumShardsTokens(numShardsAttr, repSpecb, nRepSpecs, nSpec) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, numShardsTokens...)) return dSpec, nil } // fillConfigsWithDynamicRegion is used for dynamic blocks in region_configs -func fillConfigsWithDynamicRegion(specbSrc *hclwrite.Body, root attrVals, changeReferences bool) (dynamicBlock, error) { +func processConfigsWithDynamicRegion(specbSrc *hclwrite.Body, root attrVals, + changeReferences bool) (dynamicBlock, error) { d, err := getDynamicBlock(specbSrc, nConfigSrc, true) if err != nil || !d.IsPresent() { return dynamicBlock{}, err @@ -270,7 +279,7 @@ func fillConfigsWithDynamicRegion(specbSrc *hclwrite.Body, root attrVals, change return d, nil } -func fillRegionConfigs(specb, specbSrc *hclwrite.Body, root attrVals) error { +func processRegionConfigs(specb, specbSrc *hclwrite.Body, root attrVals) error { var configs []*hclwrite.Body for { configSrc := specbSrc.FirstMatchingBlock(nConfigSrc, nil) @@ -301,31 +310,27 @@ func getRegionConfig(configSrc *hclwrite.Block, root attrVals, isDynamicBlock bo if err := hcl.MoveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil { return nil, err } - if electable, _ := getSpec(configSrc, nElectableNodes, root, isDynamicBlock); electable != nil { - fileb.SetAttributeRaw(nElectableSpecs, electable) - } - if readOnly, _ := getSpec(configSrc, nReadOnlyNodes, root, isDynamicBlock); readOnly != nil { - fileb.SetAttributeRaw(nReadOnlySpecs, readOnly) - } - if analytics, _ := getSpec(configSrc, nAnalyticsNodes, root, isDynamicBlock); analytics != nil { - fileb.SetAttributeRaw(nAnalyticsSpecs, analytics) - } + processSpec(fileb, configSrc, nElectableSpecs, nElectableNodes, root, isDynamicBlock) + processSpec(fileb, configSrc, nReadOnlySpecs, nReadOnlyNodes, root, isDynamicBlock) + processSpec(fileb, configSrc, nAnalyticsSpecs, nAnalyticsNodes, root, isDynamicBlock) + if autoScaling := getAutoScalingOpt(root.opt); autoScaling != nil { fileb.SetAttributeRaw(nAutoScaling, autoScaling) } return fileb, nil } -func getSpec(configSrc *hclwrite.Block, countName string, root attrVals, isDynamicBlock bool) (hclwrite.Tokens, error) { +func processSpec(configb *hclwrite.Body, configSrc *hclwrite.Block, + specName, countName string, root attrVals, isDynamicBlock bool) { var ( fileb = hclwrite.NewEmptyFile().Body() count = configSrc.Body().GetAttribute(countName) ) if count == nil { - return nil, fmt.Errorf("%s: attribute %s not found", errRepSpecs, countName) + return } if countVal, errVal := hcl.GetAttrInt(count, errRepSpecs); countVal == 0 && errVal == nil { - return nil, fmt.Errorf("%s: attribute %s is 0", errRepSpecs, countName) + return } fileb.SetAttributeRaw(nNodeCount, count.Expr().BuildTokens(nil)) fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc]) @@ -342,7 +347,7 @@ func getSpec(configSrc *hclwrite.Block, countName string, root attrVals, isDynam if isDynamicBlock { tokens = append(hcl.TokensFromExpr(fmt.Sprintf("%s == 0 ? null :", hcl.GetAttrExpr(count))), tokens...) } - return tokens, nil + configb.SetAttributeRaw(specName, tokens) } func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens { @@ -389,11 +394,6 @@ func getResourceLabel(resource *hclwrite.Block) string { return labels[1] } -func replaceDynamicBlockExpr(attr *hclwrite.Attribute, blockName, attrName string) string { - expr := hcl.GetAttrExpr(attr) - return strings.ReplaceAll(expr, fmt.Sprintf("%s.%s", blockName, attrName), attrName) -} - // getDynamicBlockRegionArray returns the region array for a dynamic block in replication_specs. // e.g. [ for region in var.replication_specs.regions_config : { ... } if priority == region.priority ] func getDynamicBlockRegionArray(forEach string, configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { @@ -426,20 +426,6 @@ func sortConfigsByPriority(configs []*hclwrite.Body) []*hclwrite.Body { return configs } -func setKeyValue(body *hclwrite.Body, key, value *hclwrite.Attribute) { - keyStr, err := hcl.GetAttrString(key) - if err == nil { - if !hclsyntax.ValidIdentifier(keyStr) { - // wrap in quotes so invalid identifiers (e.g. with blanks) can be used as attribute names - keyStr = strconv.Quote(keyStr) - } - } else { - keyStr = strings.TrimSpace(string(key.Expr().BuildTokens(nil).Bytes())) - keyStr = "(" + keyStr + ")" // wrap in parentheses so non-literal expressions can be used as attribute names - } - body.SetAttributeRaw(keyStr, value.Expr().BuildTokens(nil)) -} - // popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them. func popRootAttrs(body *hclwrite.Body) (attrVals, error) { var ( diff --git a/internal/convert/shared.go b/internal/convert/shared.go index ffdff9f..800f9a8 100644 --- a/internal/convert/shared.go +++ b/internal/convert/shared.go @@ -4,8 +4,10 @@ import ( "errors" "fmt" "slices" + "strconv" "strings" + "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" ) @@ -14,6 +16,18 @@ var ( errDynamicBlockAlone = errors.New("dynamic block must be the only block, see docs for more information") ) +// addComments adds appropriate comments to a converted block +func addComments(block *hclwrite.Block, isUpdatedComment bool) { + blockb := block.Body() + blockb.AppendNewline() + if isUpdatedComment { + hcl.AppendComment(blockb, commentUpdatedBy) + } else { + hcl.AppendComment(blockb, commentGeneratedBy) + hcl.AppendComment(blockb, commentConfirmReferences) + } +} + // hasVariableNumShards checks if any block has a variable (non-literal) num_shards attribute func hasVariableNumShards(blocks []*hclwrite.Block) bool { for _, block := range blocks { @@ -163,7 +177,21 @@ func fillAdvConfigOpt(resourceb *hclwrite.Body) { fillBlockOpt(resourceb, nAdvConfig) } -// buildForExpr builds a for expression with the given variable and collection +// processCommonOptionalBlocks processes tags, labels, and other optional blocks. +func processCommonOptionalBlocks(resourceb *hclwrite.Body) error { + for _, name := range []string{nTags, nLabels} { + if err := fillTagsLabelsOpt(resourceb, name); err != nil { + return err + } + } + fillAdvConfigOpt(resourceb) + for _, name := range []string{nBiConnector, nPinnedFCV, nTimeouts} { + fillBlockOpt(resourceb, name) + } + return nil +} + +// buildForExpr builds a for expression with the given variable and collection. func buildForExpr(varName, collection string, trailingSpace bool) string { expr := fmt.Sprintf("for %s in %s :", varName, collection) if trailingSpace { @@ -172,6 +200,26 @@ func buildForExpr(varName, collection string, trailingSpace bool) string { return expr } +// handleZoneName adds zone_name attribute to the body if present in source +func handleZoneName(targetBody, sourceBody *hclwrite.Body, blockName, varName string) { + if zoneNameAttr := sourceBody.GetAttribute(nZoneName); zoneNameAttr != nil { + zoneNameExpr := transformReference(hcl.GetAttrExpr(zoneNameAttr), blockName, varName) + targetBody.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneNameExpr)) + } +} + +// buildNumShardsTokens builds tokens for handling num_shards with for loops +func buildNumShardsTokens(numShardsAttr *hclwrite.Attribute, repSpecb *hclwrite.Body, + blockName, varName string) hclwrite.Tokens { + if numShardsAttr == nil { + return hcl.TokensArraySingle(repSpecb) + } + numShardsExpr := transformReference(hcl.GetAttrExpr(numShardsAttr), blockName, varName) + tokens := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", numShardsExpr), false)) + tokens = append(tokens, hcl.TokensObject(repSpecb)...) + return hcl.EncloseBracketsNewLines(tokens) +} + func fillTagsLabelsOpt(resourceb *hclwrite.Body, name string) error { tokensDynamic, err := extractTagsLabelsDynamicBlock(resourceb, name) if err != nil { @@ -235,3 +283,22 @@ func extractTagsLabelsIndividual(resourceb *hclwrite.Body, name string) (hclwrit } return hcl.TokensObject(fileb), nil } + +func replaceDynamicBlockExpr(attr *hclwrite.Attribute, blockName, attrName string) string { + expr := hcl.GetAttrExpr(attr) + return strings.ReplaceAll(expr, fmt.Sprintf("%s.%s", blockName, attrName), attrName) +} + +func setKeyValue(body *hclwrite.Body, key, value *hclwrite.Attribute) { + keyStr, err := hcl.GetAttrString(key) + if err == nil { + if !hclsyntax.ValidIdentifier(keyStr) { + // Wrap in quotes so invalid identifiers (e.g. with blanks) can be used as attribute names + keyStr = strconv.Quote(keyStr) + } + } else { + keyStr = strings.TrimSpace(string(key.Expr().BuildTokens(nil).Bytes())) + keyStr = "(" + keyStr + ")" // Wrap in parentheses so non-literal expressions can be used as attribute names + } + body.SetAttributeRaw(keyStr, value.Expr().BuildTokens(nil)) +} diff --git a/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.in.tf b/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.in.tf new file mode 100644 index 0000000..947056a --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.in.tf @@ -0,0 +1,82 @@ +locals { + replication_specs_list = [ + { + zone_name = "zone1" + region_name = "US_EAST_1" + }, + { + zone_name = "zone2" + region_name = "US_WEST_2" + } + ] +} + +resource "mongodbatlas_advanced_cluster" "one_config" { + project_id = "123" + name = "cluster" + cluster_type = "SHARDED" + + dynamic "replication_specs" { + for_each = local.replication_specs_list + content { + num_shards = 2 + zone_name = replication_specs.value.zone_name + + region_configs { + provider_name = "AWS" + region_name = replication_specs.value.region_name + priority = 7 + + electable_specs { + instance_size = "M10" + node_count = 3 + } + auto_scaling { + disk_gb_enabled = true + } + } + } + } +} + +resource "mongodbatlas_advanced_cluster" "multiple_config" { + project_id = "123" + name = "cluster" + cluster_type = "SHARDED" + + dynamic "replication_specs" { + for_each = local.replication_specs_list + content { + num_shards = 2 + zone_name = replication_specs.value.zone_name + + region_configs { + provider_name = "AWS" + region_name = replication_specs.value.region_name + priority = 7 + + electable_specs { + instance_size = "M10" + node_count = 2 + } + auto_scaling { + disk_gb_enabled = true + } + } + + region_configs { + provider_name = "AWS" + region_name = replication_specs.value.region_name + priority = 6 + + electable_specs { + instance_size = "M10" + node_count = 1 + } + auto_scaling { + disk_gb_enabled = true + } + } + } + } +} diff --git a/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.out.tf b/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.out.tf new file mode 100644 index 0000000..d02f709 --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_replication_specs_no_dynamic_region_configs.out.tf @@ -0,0 +1,84 @@ +locals { + replication_specs_list = [ + { + zone_name = "zone1" + region_name = "US_EAST_1" + }, + { + zone_name = "zone2" + region_name = "US_WEST_2" + } + ] +} + +resource "mongodbatlas_advanced_cluster" "one_config" { + project_id = "123" + name = "cluster" + cluster_type = "SHARDED" + + replication_specs = flatten([ + for spec in local.replication_specs_list : [ + for i in range(2) : { + zone_name = spec.zone_name + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = spec.region_name + electable_specs = { + instance_size = "M10" + node_count = 3 + } + auto_scaling = { + disk_gb_enabled = true + } + } + ] + } + ] + ]) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "multiple_config" { + project_id = "123" + name = "cluster" + cluster_type = "SHARDED" + + replication_specs = flatten([ + for spec in local.replication_specs_list : [ + for i in range(2) : { + zone_name = spec.zone_name + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = spec.region_name + electable_specs = { + instance_size = "M10" + node_count = 2 + } + auto_scaling = { + disk_gb_enabled = true + } + }, + { + priority = 6 + provider_name = "AWS" + region_name = spec.region_name + electable_specs = { + instance_size = "M10" + node_count = 1 + } + auto_scaling = { + disk_gb_enabled = true + } + } + ] + } + ] + ]) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} diff --git a/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.in.tf b/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.in.tf new file mode 100644 index 0000000..11e7c6b --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.in.tf @@ -0,0 +1,60 @@ +locals { + replication_specs_list = [ + { + zone_name = "zone1" + region_name = "US_EAST_1" + }, + { + zone_name = "zone2" + region_name = "US_WEST_2" + } + ] +} + +resource "mongodbatlas_cluster" "one_config" { + project_id = "123" + name = "cluster" + provider_name = "AWS" + provider_instance_size_name = "M10" + disk_size_gb = 10 + auto_scaling_disk_gb_enabled = true + dynamic "replication_specs" { + for_each = local.replication_specs_list + content { + num_shards = 2 + zone_name = replication_specs.value.zone_name + regions_config { + region_name = replication_specs.value.region_name + priority = 7 + electable_nodes = 3 + } + } + } +} + +resource "mongodbatlas_cluster" "multiple_config" { + project_id = "123" + name = "cluster" + provider_name = "AWS" + provider_instance_size_name = "M10" + disk_size_gb = 10 + auto_scaling_disk_gb_enabled = true + dynamic "replication_specs" { + for_each = local.replication_specs_list + content { + num_shards = 2 + zone_name = replication_specs.value.zone_name + regions_config { + region_name = replication_specs.value.region_name + priority = 7 + electable_nodes = 2 + } + regions_config { + region_name = replication_specs.value.region_name + priority = 6 + electable_nodes = 1 + read_only_nodes = 4 + } + } + } +} diff --git a/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.out.tf b/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.out.tf new file mode 100644 index 0000000..32202cf --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_replication_specs_no_dynamic_region_configs.out.tf @@ -0,0 +1,90 @@ +locals { + replication_specs_list = [ + { + zone_name = "zone1" + region_name = "US_EAST_1" + }, + { + zone_name = "zone2" + region_name = "US_WEST_2" + } + ] +} + +resource "mongodbatlas_advanced_cluster" "one_config" { + project_id = "123" + name = "cluster" + replication_specs = flatten([ + for spec in local.replication_specs_list : [ + for i in range(2) : { + zone_name = spec.zone_name + region_configs = [ + { + provider_name = "AWS" + region_name = spec.region_name + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + disk_size_gb = 10 + } + auto_scaling = { + disk_gb_enabled = true + } + } + ] + } + ] + ]) + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} + +resource "mongodbatlas_advanced_cluster" "multiple_config" { + project_id = "123" + name = "cluster" + replication_specs = flatten([ + for spec in local.replication_specs_list : [ + for i in range(2) : { + zone_name = spec.zone_name + region_configs = [ + { + provider_name = "AWS" + region_name = spec.region_name + priority = 7 + electable_specs = { + node_count = 2 + instance_size = "M10" + disk_size_gb = 10 + } + auto_scaling = { + disk_gb_enabled = true + } + }, + { + provider_name = "AWS" + region_name = spec.region_name + priority = 6 + electable_specs = { + node_count = 1 + instance_size = "M10" + disk_size_gb = 10 + } + read_only_specs = { + node_count = 4 + instance_size = "M10" + disk_size_gb = 10 + } + auto_scaling = { + disk_gb_enabled = true + } + } + ] + } + ] + ]) + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} diff --git a/test/e2e/e2e_helper.go b/test/e2e/e2e_helper.go index 2749d0a..b16bbc4 100644 --- a/test/e2e/e2e_helper.go +++ b/test/e2e/e2e_helper.go @@ -4,6 +4,7 @@ import ( "context" "os" "os/exec" + "strconv" "testing" "github.com/spf13/afero" @@ -12,8 +13,16 @@ import ( ) func RunTF(args ...string) (string, error) { + ctx := context.Background() + + // Ensure Atlas CLI storage warning is silenced before running tests as it is not enabled in GitHub Actions + inCI, _ := strconv.ParseBool(os.Getenv("CI")) + if inCI { + _ = exec.CommandContext(ctx, "atlas", "config", "set", "silence_storage_warning", "true").Run() + } + args = append([]string{"tf"}, args...) - cmd := exec.CommandContext(context.Background(), "atlas", args...) + cmd := exec.CommandContext(ctx, "atlas", args...) resp, err := cmd.CombinedOutput() return string(resp), err }