diff --git a/Makefile b/Makefile index 68ab53f..3f502ab 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,17 @@ clean: ## Clean binary folders test: ## Run unit tests go test ./internal/... -timeout=30s -parallel=4 -race +.PHONY: lint-fix +lint-fix: ## Fix Go linter issues + @echo "==> Fixing linters errors..." + $(shell go env GOPATH)/bin/fieldalignment -json -fix ./... + golangci-lint run --fix + +.PHONY: lint +lint: ## Check Go linter issues + @echo "==> Checking source code against linters..." + golangci-lint run + .PHONY: test-update test-update: ## Run unit tests and update the golden files go test ./internal/... -timeout=30s -parallel=4 -race -update diff --git a/README.md b/README.md index eb3839c..4e6fef0 100644 --- a/README.md +++ b/README.md @@ -120,7 +120,6 @@ dynamic "replication_specs" { ### Limitations -- [`num_shards`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/cluster#num_shards-2) in `replication_specs` must be a numeric [literal expression](https://developer.hashicorp.com/nomad/docs/job-specification/hcl2/expressions#literal-expressions), e.g. `var.num_shards` is not supported. This is to allow creating a `replication_specs` element per shard in `mongodbatlas_advanced_cluster`. This limitation doesn't apply if you're using `dynamic` blocks in `regions_config` or `replication_specs`. - `dynamic` blocks are supported with some [limitations](./docs/guide_clu2adv_dynamic_block.md). ## Feedback diff --git a/internal/convert/adv2v2.go b/internal/convert/adv2v2.go index f1c659e..2e3e3c3 100644 --- a/internal/convert/adv2v2.go +++ b/internal/convert/adv2v2.go @@ -11,7 +11,6 @@ import ( // AdvancedClusterToV2 transforms all mongodbatlas_advanced_cluster resource definitions in a // Terraform configuration file from SDKv2 schema to TPF (Terraform Plugin Framework) schema. // All other resources and data sources are left untouched. -// TODO: Not implemented yet. func AdvancedClusterToV2(config []byte) ([]byte, error) { parser, err := hcl.GetParser(config) if err != nil { @@ -37,6 +36,9 @@ func updateResource(resource *hclwrite.Block) (bool, error) { return false, nil } resourceb := resource.Body() + if errDyn := checkDynamicBlock(resourceb); errDyn != nil { + return false, errDyn + } if hasExpectedBlocksAsAttributes(resourceb) { return false, nil } @@ -58,85 +60,126 @@ func updateResource(resource *hclwrite.Block) (bool, error) { } func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error { - var repSpecs []*hclwrite.Body - for { - block := resourceb.FirstMatchingBlock(nRepSpecs, nil) - if block == nil { - break - } - resourceb.RemoveBlock(block) + d, err := convertRepSpecsWithDynamicBlock(resourceb, diskSizeGB) + if err != nil { + return err + } + if d.IsPresent() { + resourceb.RemoveBlock(d.block) + resourceb.SetAttributeRaw(nRepSpecs, d.tokens) + return nil + } + repSpecBlocks := collectBlocks(resourceb, nRepSpecs) + if len(repSpecBlocks) == 0 { + return fmt.Errorf("must have at least one replication_specs") + } + hasVariableShards := hasVariableNumShards(repSpecBlocks) + var resultTokens []hclwrite.Tokens + var resultBodies []*hclwrite.Body + for _, block := range repSpecBlocks { blockb := block.Body() - numShardsVal := 1 // default to 1 if num_shards not present - if numShardsAttr := blockb.GetAttribute(nNumShards); numShardsAttr != nil { - var err error - if numShardsVal, err = hcl.GetAttrInt(numShardsAttr, errNumShards); err != nil { - return err + shardsAttr := blockb.GetAttribute(nNumShards) + blockb.RemoveAttribute(nNumShards) + dConfig, err := getDynamicBlock(blockb, nConfig) + if err != nil { + return err + } + if dConfig.IsPresent() { + transformReferences(dConfig.content.Body(), getResourceName(dConfig.block), nRegion) + copyAttributesSorted(dConfig.content.Body(), dConfig.content.Body().Attributes()) + processAllSpecs(dConfig.content.Body(), diskSizeGB) + tokens := hcl.TokensFromExpr(buildForExpr(nRegion, hcl.GetAttrExpr(dConfig.forEach), false)) + tokens = append(tokens, hcl.TokensObject(dConfig.content.Body())...) + blockb.SetAttributeRaw(nConfig, hcl.EncloseBracketsNewLines(tokens)) + blockb.RemoveBlock(dConfig.block) + } else { + var configs []*hclwrite.Body + for _, configBlock := range collectBlocks(blockb, nConfig) { + configBlockb := configBlock.Body() + processAllSpecs(configBlockb, diskSizeGB) + configs = append(configs, configBlockb) } - blockb.RemoveAttribute(nNumShards) + if len(configs) == 0 { + return fmt.Errorf("replication_specs must have at least one region_configs") + } + blockb.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) } - if err := convertConfig(blockb, diskSizeGB); err != nil { - return err + if hasVariableShards { + resultTokens = append(resultTokens, processNumShardsWhenSomeIsVariable(shardsAttr, blockb)) + continue + } + numShardsVal := 1 // Default to 1 if num_shards is not set + if shardsAttr != nil { + numShardsVal, _ = hcl.GetAttrInt(shardsAttr, errNumShards) } for range numShardsVal { - repSpecs = append(repSpecs, blockb) + resultBodies = append(resultBodies, blockb) } } - if len(repSpecs) == 0 { - return fmt.Errorf("must have at least one replication_specs") + if hasVariableShards { + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensFuncConcat(resultTokens...)) + } else { + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArray(resultBodies)) } - resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArray(repSpecs)) return nil } -func convertConfig(repSpecs *hclwrite.Body, diskSizeGB hclwrite.Tokens) error { - var configs []*hclwrite.Body - for { - block := repSpecs.FirstMatchingBlock(nConfig, nil) - if block == nil { - break - } - repSpecs.RemoveBlock(block) - blockb := block.Body() - fillSpecOpt(blockb, nElectableSpecs, diskSizeGB) - fillSpecOpt(blockb, nReadOnlySpecs, diskSizeGB) - fillSpecOpt(blockb, nAnalyticsSpecs, diskSizeGB) - fillSpecOpt(blockb, nAutoScaling, nil) // auto_scaling doesn't need disk_size_gb - fillSpecOpt(blockb, nAnalyticsAutoScaling, nil) // analytics_auto_scaling doesn't need disk_size_gb - configs = append(configs, blockb) +func convertRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) (dynamicBlock, error) { + dSpec, err := getDynamicBlock(resourceb, nRepSpecs) + if err != nil || !dSpec.IsPresent() { + return dynamicBlock{}, err } - if len(configs) == 0 { - return fmt.Errorf("replication_specs must have at least one region_configs") + transformReferences(dSpec.content.Body(), nRepSpecs, nSpec) + dConfig, err := convertConfigsWithDynamicBlock(dSpec.content.Body(), diskSizeGB) + if err != nil { + return dynamicBlock{}, err } - repSpecs.SetAttributeRaw(nConfig, hcl.TokensArray(configs)) - return nil + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) + dSpec.tokens = hcl.TokensFuncFlatten(append(forSpec, dConfig.tokens...)) + return dSpec, nil } -func fillSpecOpt(resourceb *hclwrite.Body, name string, diskSizeGBTokens hclwrite.Tokens) { - block := resourceb.FirstMatchingBlock(name, nil) - if block == nil { - return +func convertConfigsWithDynamicBlock(specbSrc *hclwrite.Body, diskSizeGB hclwrite.Tokens) (dynamicBlock, error) { + d, err := getDynamicBlock(specbSrc, nConfig) + if err != nil { + return dynamicBlock{}, err } - if diskSizeGBTokens != nil { - blockb := block.Body() - blockb.RemoveAttribute(nDiskSizeGB) - blockb.SetAttributeRaw(nDiskSizeGB, diskSizeGBTokens) + configBody := d.content.Body() + transformReferences(configBody, getResourceName(d.block), nRegion) + regionConfigBody := hclwrite.NewEmptyFile().Body() + copyAttributesSorted(regionConfigBody, configBody.Attributes()) + for _, block := range configBody.Blocks() { + blockType := block.Type() + blockBody := hclwrite.NewEmptyFile().Body() + copyAttributesSorted(blockBody, block.Body().Attributes()) + if diskSizeGB != nil && + (blockType == nElectableSpecs || blockType == nReadOnlySpecs || blockType == nAnalyticsSpecs) { + blockBody.SetAttributeRaw(nDiskSizeGB, diskSizeGB) + } + regionConfigBody.SetAttributeRaw(blockType, hcl.TokensObject(blockBody)) } - fillBlockOpt(resourceb, name) + repSpecb := hclwrite.NewEmptyFile().Body() + if zoneNameAttr := specbSrc.GetAttribute(nZoneName); zoneNameAttr != nil { + repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr( + transformReference(hcl.GetAttrExpr(zoneNameAttr), nRepSpecs, nSpec))) + } + regionTokens := hcl.TokensFromExpr(buildForExpr(nRegion, fmt.Sprintf("%s.%s", nSpec, nConfig), false)) + regionTokens = append(regionTokens, hcl.TokensObject(regionConfigBody)...) + repSpecb.SetAttributeRaw(nConfig, hcl.EncloseBracketsNewLines(regionTokens)) + if numShardsAttr := specbSrc.GetAttribute(nNumShards); numShardsAttr != nil { + tokens := hcl.TokensFromExpr(buildForExpr("i", + fmt.Sprintf("range(%s)", transformReference(hcl.GetAttrExpr(numShardsAttr), nRepSpecs, nSpec)), false)) + tokens = append(tokens, hcl.TokensObject(repSpecb)...) + return dynamicBlock{tokens: hcl.EncloseBracketsNewLines(tokens)}, nil + } + return dynamicBlock{tokens: hcl.TokensArraySingle(repSpecb)}, nil } // hasExpectedBlocksAsAttributes checks if any of the expected block names // exist as attributes in the resource body. In that case conversion is not done // as advanced cluster is not in a valid SDKv2 configuration. func hasExpectedBlocksAsAttributes(resourceb *hclwrite.Body) bool { - expectedBlocks := []string{ - nRepSpecs, - nTags, - nLabels, - nAdvConfig, - nBiConnector, - nPinnedFCV, - nTimeouts, - } + expectedBlocks := []string{nRepSpecs, nTags, nLabels, nAdvConfig, nBiConnector, nPinnedFCV, nTimeouts} for name := range resourceb.Attributes() { if slices.Contains(expectedBlocks, name) { return true @@ -144,3 +187,36 @@ func hasExpectedBlocksAsAttributes(resourceb *hclwrite.Body) bool { } return false } + +func copyAttributesSorted(targetBody *hclwrite.Body, sourceAttrs map[string]*hclwrite.Attribute) { + var names []string + for name := range sourceAttrs { + names = append(names, name) + } + slices.Sort(names) + for _, name := range names { + expr := hcl.GetAttrExpr(sourceAttrs[name]) + targetBody.SetAttributeRaw(name, hcl.TokensFromExpr(expr)) + } +} + +func processAllSpecs(body *hclwrite.Body, diskSizeGB hclwrite.Tokens) { + fillSpecOpt(body, nElectableSpecs, diskSizeGB) + fillSpecOpt(body, nReadOnlySpecs, diskSizeGB) + fillSpecOpt(body, nAnalyticsSpecs, diskSizeGB) + fillSpecOpt(body, nAutoScaling, nil) + fillSpecOpt(body, nAnalyticsAutoScaling, nil) +} + +func fillSpecOpt(resourceb *hclwrite.Body, name string, diskSizeGBTokens hclwrite.Tokens) { + block := resourceb.FirstMatchingBlock(name, nil) + if block == nil { + return + } + if diskSizeGBTokens != nil { + blockb := block.Body() + blockb.RemoveAttribute(nDiskSizeGB) + blockb.SetAttributeRaw(nDiskSizeGB, diskSizeGBTokens) + } + fillBlockOpt(resourceb, name) +} diff --git a/internal/convert/clu2adv.go b/internal/convert/clu2adv.go index 6897f67..7fae2a3 100644 --- a/internal/convert/clu2adv.go +++ b/internal/convert/clu2adv.go @@ -2,7 +2,6 @@ package convert import ( "fmt" - "slices" "sort" "strconv" "strings" @@ -13,10 +12,6 @@ import ( "github.com/zclconf/go-cty/cty" ) -var ( - dynamicBlockAllowList = []string{nTags, nLabels, nConfigSrc, nRepSpecs} -) - type attrVals struct { req map[string]hclwrite.Tokens opt map[string]hclwrite.Tokens @@ -67,7 +62,6 @@ func convertResource(block *hclwrite.Block) (bool, error) { if errDyn := checkDynamicBlock(blockb); errDyn != nil { return false, errDyn } - var err error if isFreeTierCluster(blockb) { err = fillFreeTierCluster(blockb) @@ -122,8 +116,7 @@ func fillMovedBlocks(body *hclwrite.Body, moveLabels []string) { // fillFreeTierCluster is the entry point to convert clusters in free tier func fillFreeTierCluster(resourceb *hclwrite.Body) error { resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType)) - config := hclwrite.NewEmptyFile() - configb := config.Body() + configb := hclwrite.NewEmptyFile().Body() hcl.SetAttrInt(configb, nPriority, valMaxPriority) if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { return err @@ -134,15 +127,14 @@ func fillFreeTierCluster(resourceb *hclwrite.Body) error { if err := hcl.MoveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil { return err } - electableSpec := hclwrite.NewEmptyFile() - if err := hcl.MoveAttr(resourceb, electableSpec.Body(), nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil { + electableSpecb := hclwrite.NewEmptyFile().Body() + if err := hcl.MoveAttr(resourceb, electableSpecb, nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil { return err } - configb.SetAttributeRaw(nElectableSpecs, hcl.TokensObject(electableSpec.Body())) - - repSpecs := hclwrite.NewEmptyFile() - repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArraySingle(configb)) - resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArraySingle(repSpecs.Body())) + configb.SetAttributeRaw(nElectableSpecs, hcl.TokensObject(electableSpecb)) + repSpecsb := hclwrite.NewEmptyFile().Body() + repSpecsb.SetAttributeRaw(nConfig, hcl.TokensArraySingle(configb)) + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArraySingle(repSpecsb)) return nil } @@ -155,7 +147,7 @@ func fillCluster(resourceb *hclwrite.Body) error { resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs // ok to fail as cloud_backup is optional _ = hcl.MoveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs) - if err := fillReplicationSpecs(resourceb, root); err != nil { + if err := fillRepSpecs(resourceb, root); err != nil { return err } if err := fillTagsLabelsOpt(resourceb, nTags); err != nil { @@ -171,8 +163,8 @@ func fillCluster(resourceb *hclwrite.Body) error { return nil } -func fillReplicationSpecs(resourceb *hclwrite.Body, root attrVals) error { - d, err := fillReplicationSpecsWithDynamicBlock(resourceb, root) +func fillRepSpecs(resourceb *hclwrite.Body, root attrVals) error { + d, err := fillRepSpecsWithDynamicBlock(resourceb, root) if err != nil { return err } @@ -181,191 +173,99 @@ func fillReplicationSpecs(resourceb *hclwrite.Body, root attrVals) error { resourceb.SetAttributeRaw(nRepSpecs, d.tokens) return nil } - // at least one replication_specs exists here, if not it would be a free tier cluster - var specbs []*hclwrite.Body - for { - var ( - specSrc = resourceb.FirstMatchingBlock(nRepSpecs, nil) - spec = hclwrite.NewEmptyFile() - specb = spec.Body() - ) - if specSrc == nil { - break - } - specbSrc := specSrc.Body() - d, err := fillWithDynamicRegionConfigs(specbSrc, root, false) - if err != nil { - return err - } - if d.IsPresent() { - resourceb.RemoveBlock(specSrc) - resourceb.SetAttributeRaw(nRepSpecs, d.tokens) - return nil - } - // ok to fail as zone_name is optional + repSpecBlocks := collectBlocks(resourceb, nRepSpecs) + if len(repSpecBlocks) == 0 { + return fmt.Errorf("must have at least one replication_specs") + } + dConfig, err := fillConfigsWithDynamicRegion(repSpecBlocks[0].Body(), root, false) + if err != nil { + return err + } + if dConfig.IsPresent() { + resourceb.SetAttributeRaw(nRepSpecs, dConfig.tokens) + return nil + } + hasVariableShards := hasVariableNumShards(repSpecBlocks) + var resultTokens []hclwrite.Tokens + var resultBodies []*hclwrite.Body + for _, block := range repSpecBlocks { + specb := hclwrite.NewEmptyFile().Body() + specbSrc := block.Body() _ = hcl.MoveAttr(specbSrc, specb, nZoneName, nZoneName, errRepSpecs) - shards := specbSrc.GetAttribute(nNumShards) - if shards == nil { + shardsAttr := specbSrc.GetAttribute(nNumShards) + if shardsAttr == nil { return fmt.Errorf("%s: %s not found", errRepSpecs, nNumShards) } - shardsVal, err := hcl.GetAttrInt(shards, errNumShards) - if err != nil { - return err + if errConfig := fillRegionConfigs(specb, specbSrc, root); errConfig != nil { + return errConfig + } + if hasVariableShards { + resultTokens = append(resultTokens, processNumShardsWhenSomeIsVariable(shardsAttr, specb)) + continue } - if err := fillRegionConfigs(specb, specbSrc, root); err != nil { + shardsVal, err := hcl.GetAttrInt(shardsAttr, errNumShards) + if err != nil { return err } for range shardsVal { - specbs = append(specbs, specb) + resultBodies = append(resultBodies, specb) } - resourceb.RemoveBlock(specSrc) - } - resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArray(specbs)) - return nil -} - -func fillTagsLabelsOpt(resourceb *hclwrite.Body, name string) error { - tokensDynamic, err := extractTagsLabelsDynamicBlock(resourceb, name) - if err != nil { - return err - } - tokensIndividual, err := extractTagsLabelsIndividual(resourceb, name) - if err != nil { - return err - } - if tokensDynamic != nil && tokensIndividual != nil { - resourceb.SetAttributeRaw(name, hcl.TokensFuncMerge(tokensDynamic, tokensIndividual)) - return nil } - if tokensDynamic != nil { - resourceb.SetAttributeRaw(name, tokensDynamic) - } - if tokensIndividual != nil { - resourceb.SetAttributeRaw(name, tokensIndividual) + if hasVariableShards { + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensFuncConcat(resultTokens...)) + } else { + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArray(resultBodies)) } return nil } -func extractTagsLabelsDynamicBlock(resourceb *hclwrite.Body, name string) (hclwrite.Tokens, error) { - d, err := getDynamicBlock(resourceb, name) - if err != nil || !d.IsPresent() { - return nil, err - } - key := d.content.Body().GetAttribute(nKey) - value := d.content.Body().GetAttribute(nValue) - if key == nil || value == nil { - return nil, fmt.Errorf("dynamic block %s: %s or %s not found", name, nKey, nValue) - } - keyExpr := replaceDynamicBlockExpr(key, name, nKey) - valueExpr := replaceDynamicBlockExpr(value, name, nValue) - collectionExpr := hcl.GetAttrExpr(d.forEach) - forExpr := fmt.Sprintf("for key, value in %s : %s => %s", collectionExpr, keyExpr, valueExpr) - tokens := hcl.TokensObjectFromExpr(forExpr) - if keyExpr == nKey && valueExpr == nValue { // expression can be simplified and use for_each expression - tokens = hcl.TokensFromExpr(collectionExpr) - } - resourceb.RemoveBlock(d.block) - return tokens, nil -} - -func extractTagsLabelsIndividual(resourceb *hclwrite.Body, name string) (hclwrite.Tokens, error) { - var ( - file = hclwrite.NewEmptyFile() - fileb = file.Body() - found = false - ) - for { - block := resourceb.FirstMatchingBlock(name, nil) - if block == nil { - break - } - key := block.Body().GetAttribute(nKey) - value := block.Body().GetAttribute(nValue) - if key == nil || value == nil { - return nil, fmt.Errorf("%s: %s or %s not found", name, nKey, nValue) - } - setKeyValue(fileb, key, value) - resourceb.RemoveBlock(block) - found = true - } - if !found { - return nil, nil - } - return hcl.TokensObject(fileb), nil -} - -func fillBlockOpt(resourceb *hclwrite.Body, name string) { - block := resourceb.FirstMatchingBlock(name, nil) - if block == nil { - return - } - resourceb.RemoveBlock(block) - resourceb.SetAttributeRaw(name, hcl.TokensObject(block.Body())) -} - -func fillAdvConfigOpt(resourceb *hclwrite.Body) { - block := resourceb.FirstMatchingBlock(nAdvConfig, nil) - if block == nil { - return - } - blockBody := block.Body() - - // Remove deprecated attributes from advanced_configuration - blockBody.RemoveAttribute(nFailIndexKeyTooLong) - blockBody.RemoveAttribute(nDefaultReadConcern) - - fillBlockOpt(resourceb, nAdvConfig) -} - -// fillReplicationSpecsWithDynamicBlock used for dynamic blocks in replication_specs -func fillReplicationSpecsWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dynamicBlock, error) { +// fillRepSpecsWithDynamicBlock used for dynamic blocks in replication_specs +func fillRepSpecsWithDynamicBlock(resourceb *hclwrite.Body, root attrVals) (dynamicBlock, error) { dSpec, err := getDynamicBlock(resourceb, nRepSpecs) if err != nil || !dSpec.IsPresent() { return dynamicBlock{}, err } - transformDynamicBlockReferences(dSpec.content.Body(), nRepSpecs, nSpec) - dConfig, err := fillWithDynamicRegionConfigs(dSpec.content.Body(), root, true) + transformReferences(dSpec.content.Body(), nRepSpecs, nSpec) + dConfig, err := fillConfigsWithDynamicRegion(dSpec.content.Body(), root, true) if err != nil { return dynamicBlock{}, err } - forSpec := hcl.TokensFromExpr(fmt.Sprintf("for %s in %s : ", nSpec, hcl.GetAttrExpr(dSpec.forEach))) + forSpec := hcl.TokensFromExpr(buildForExpr(nSpec, hcl.GetAttrExpr(dSpec.forEach), true)) forSpec = append(forSpec, dConfig.tokens...) tokens := hcl.TokensFuncFlatten(forSpec) dSpec.tokens = tokens return dSpec, nil } -// fillWithDynamicRegionConfigs is used for dynamic blocks in region_configs -func fillWithDynamicRegionConfigs(specbSrc *hclwrite.Body, root attrVals, changeReferences bool) (dynamicBlock, error) { +// fillConfigsWithDynamicRegion is used for dynamic blocks in region_configs +func fillConfigsWithDynamicRegion(specbSrc *hclwrite.Body, root attrVals, changeReferences bool) (dynamicBlock, error) { d, err := getDynamicBlock(specbSrc, nConfigSrc) if err != nil || !d.IsPresent() { return dynamicBlock{}, err } - repSpec := hclwrite.NewEmptyFile() - repSpecb := repSpec.Body() + repSpecb := hclwrite.NewEmptyFile().Body() if zoneName := hcl.GetAttrExpr(specbSrc.GetAttribute(nZoneName)); zoneName != "" { repSpecb.SetAttributeRaw(nZoneName, hcl.TokensFromExpr(zoneName)) } forEach := hcl.GetAttrExpr(d.forEach) if changeReferences { - forEach = replaceDynamicBlockReferences(forEach, nRepSpecs, nSpec) + forEach = transformReference(forEach, nRepSpecs, nSpec) } regionFor, err := getDynamicBlockRegionArray(forEach, d.content, root) if err != nil { return dynamicBlock{}, err } - priorityForStr := fmt.Sprintf("for %s in range(%d, %d, -1) : ", nPriority, valMaxPriority, valMinPriority) + priorityForStr := buildForExpr(nPriority, fmt.Sprintf("range(%d, %d, -1)", valMaxPriority, valMinPriority), true) priorityFor := hcl.TokensComment(commentPriorityFor) priorityFor = append(priorityFor, hcl.TokensFromExpr(priorityForStr)...) priorityFor = append(priorityFor, regionFor...) repSpecb.SetAttributeRaw(nConfig, hcl.TokensFuncFlatten(priorityFor)) - shards := specbSrc.GetAttribute(nNumShards) if shards == nil { return dynamicBlock{}, fmt.Errorf("%s: %s not found", errRepSpecs, nNumShards) } - tokens := hcl.TokensFromExpr(fmt.Sprintf("for i in range(%s) :", hcl.GetAttrExpr(shards))) - tokens = append(tokens, hcl.EncloseBraces(repSpec.BuildTokens(nil), true)...) + tokens := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", hcl.GetAttrExpr(shards)), false)) + tokens = append(tokens, hcl.EncloseBraces(repSpecb.BuildTokens(nil), true)...) d.tokens = hcl.EncloseBracketsNewLines(tokens) return d, nil } @@ -381,7 +281,7 @@ func fillRegionConfigs(specb, specbSrc *hclwrite.Body, root attrVals) error { if err != nil { return err } - configs = append(configs, config.Body()) + configs = append(configs, config) specbSrc.RemoveBlock(configSrc) } if len(configs) == 0 { @@ -392,9 +292,8 @@ func fillRegionConfigs(specb, specbSrc *hclwrite.Body, root attrVals) error { return nil } -func getRegionConfig(configSrc *hclwrite.Block, root attrVals, isDynamicBlock bool) (*hclwrite.File, error) { - file := hclwrite.NewEmptyFile() - fileb := file.Body() +func getRegionConfig(configSrc *hclwrite.Block, root attrVals, isDynamicBlock bool) (*hclwrite.Body, error) { + fileb := hclwrite.NewEmptyFile().Body() fileb.SetAttributeRaw(nProviderName, root.req[nProviderName]) if err := hcl.MoveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil { return nil, err @@ -414,13 +313,12 @@ func getRegionConfig(configSrc *hclwrite.Block, root attrVals, isDynamicBlock bo if autoScaling := getAutoScalingOpt(root.opt); autoScaling != nil { fileb.SetAttributeRaw(nAutoScaling, autoScaling) } - return file, nil + return fileb, nil } func getSpec(configSrc *hclwrite.Block, countName string, root attrVals, isDynamicBlock bool) (hclwrite.Tokens, error) { var ( - file = hclwrite.NewEmptyFile() - fileb = file.Body() + fileb = hclwrite.NewEmptyFile().Body() count = configSrc.Body().GetAttribute(countName) ) if count == nil { @@ -456,8 +354,7 @@ func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens { {nComputeMaxInstanceSizeSrc, nComputeMaxInstanceSize}, {nComputeScaleDownEnabledSrc, nComputeScaleDownEnabled}, } - file = hclwrite.NewEmptyFile() - fileb = file.Body() + fileb = hclwrite.NewEmptyFile().Body() found = false ) for _, tuple := range names { @@ -482,16 +379,6 @@ func setResourceName(resource *hclwrite.Block, name string) { resource.SetLabels(labels) } -// getResourceName returns the first label of a block, if it exists. -// e.g. in resource "mongodbatlas_cluster" "mycluster", the first label is "mongodbatlas_cluster". -func getResourceName(resource *hclwrite.Block) string { - labels := resource.Labels() - if len(labels) == 0 { - return "" - } - return labels[0] -} - // getResourceLabel returns the second label of a block, if it exists. // e.g. in resource "mongodbatlas_cluster" "mycluster", the second label is "mycluster". func getResourceLabel(resource *hclwrite.Block) string { @@ -502,47 +389,6 @@ func getResourceLabel(resource *hclwrite.Block) string { return labels[1] } -type dynamicBlock struct { - block *hclwrite.Block - forEach *hclwrite.Attribute - content *hclwrite.Block - tokens hclwrite.Tokens -} - -func (d dynamicBlock) IsPresent() bool { - return d.block != nil -} - -func checkDynamicBlock(body *hclwrite.Body) error { - for _, block := range body.Blocks() { - name := getResourceName(block) - if block.Type() != nDynamic || slices.Contains(dynamicBlockAllowList, name) { - continue - } - return fmt.Errorf("dynamic blocks are not supported for %s", name) - } - return nil -} - -func getDynamicBlock(body *hclwrite.Body, name string) (dynamicBlock, error) { - for _, block := range body.Blocks() { - if block.Type() != nDynamic || name != getResourceName(block) { - continue - } - blockb := block.Body() - forEach := blockb.GetAttribute(nForEach) - if forEach == nil { - return dynamicBlock{}, fmt.Errorf("dynamic block %s: attribute %s not found", name, nForEach) - } - content := blockb.FirstMatchingBlock(nContent, nil) - if content == nil { - return dynamicBlock{}, fmt.Errorf("dynamic block %s: block %s not found", name, nContent) - } - return dynamicBlock{forEach: forEach, block: block, content: content}, nil - } - return dynamicBlock{}, nil -} - func replaceDynamicBlockExpr(attr *hclwrite.Attribute, blockName, attrName string) string { expr := hcl.GetAttrExpr(attr) return strings.ReplaceAll(expr, fmt.Sprintf("%s.%s", blockName, attrName), attrName) @@ -551,7 +397,7 @@ func replaceDynamicBlockExpr(attr *hclwrite.Attribute, blockName, attrName strin // getDynamicBlockRegionArray returns the region array for a dynamic block in replication_specs. // e.g. [ for region in var.replication_specs.regions_config : { ... } if priority == region.priority ] func getDynamicBlockRegionArray(forEach string, configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { - transformDynamicBlockReferences(configSrc.Body(), nConfigSrc, nRegion) + transformReferences(configSrc.Body(), nConfigSrc, nRegion) priorityStr := hcl.GetAttrExpr(configSrc.Body().GetAttribute(nPriority)) if priorityStr == "" { return nil, fmt.Errorf("%s: %s not found", errRepSpecs, nPriority) @@ -560,27 +406,12 @@ func getDynamicBlockRegionArray(forEach string, configSrc *hclwrite.Block, root if err != nil { return nil, err } - tokens := hcl.TokensFromExpr(fmt.Sprintf("for %s in %s :", nRegion, forEach)) + tokens := hcl.TokensFromExpr(buildForExpr(nRegion, forEach, false)) tokens = append(tokens, hcl.EncloseBraces(region.BuildTokens(nil), true)...) tokens = append(tokens, hcl.TokensFromExpr(fmt.Sprintf("if %s == %s", nPriority, priorityStr))...) return hcl.EncloseBracketsNewLines(tokens), nil } -func transformDynamicBlockReferences(configSrcb *hclwrite.Body, blockName, varName string) { - for name, attr := range configSrcb.Attributes() { - expr := replaceDynamicBlockReferences(hcl.GetAttrExpr(attr), blockName, varName) - configSrcb.SetAttributeRaw(name, hcl.TokensFromExpr(expr)) - } -} - -// replaceDynamicBlockReferences changes value references, -// e.g. regions_config.value.electable_nodes to region.electable_nodes -func replaceDynamicBlockReferences(expr, blockName, varName string) string { - return strings.ReplaceAll(expr, - fmt.Sprintf("%s.%s.", blockName, nValue), - fmt.Sprintf("%s.", varName)) -} - func sortConfigsByPriority(configs []*hclwrite.Body) []*hclwrite.Body { for _, config := range configs { if _, err := hcl.GetAttrInt(config.GetAttribute(nPriority), errPriority); err != nil { @@ -596,7 +427,7 @@ func sortConfigsByPriority(configs []*hclwrite.Body) []*hclwrite.Body { } func setKeyValue(body *hclwrite.Body, key, value *hclwrite.Attribute) { - keyStr, err := hcl.GetAttrString(key, "") + keyStr, err := hcl.GetAttrString(key) if err == nil { if !hclsyntax.ValidIdentifier(keyStr) { // wrap in quotes so invalid identifiers (e.g. with blanks) can be used as attribute names diff --git a/internal/convert/shared.go b/internal/convert/shared.go new file mode 100644 index 0000000..9d11e77 --- /dev/null +++ b/internal/convert/shared.go @@ -0,0 +1,222 @@ +package convert + +import ( + "fmt" + "slices" + "strings" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" +) + +// hasVariableNumShards checks if any block has a variable (non-literal) num_shards attribute +func hasVariableNumShards(blocks []*hclwrite.Block) bool { + for _, block := range blocks { + if shardsAttr := block.Body().GetAttribute(nNumShards); shardsAttr != nil { + if _, err := hcl.GetAttrInt(shardsAttr, errNumShards); err != nil { + return true + } + } + } + return false +} + +// processNumShardsWhenSomeIsVariable handles num_shards when some replication_specs have variable num_shards +func processNumShardsWhenSomeIsVariable(shardsAttr *hclwrite.Attribute, processedBody *hclwrite.Body) hclwrite.Tokens { + if shardsAttr == nil { + return hcl.TokensArraySingle(processedBody) // Default 1 if no num_shards specified + } + if shardsVal, err := hcl.GetAttrInt(shardsAttr, errNumShards); err == nil { + var bodies []*hclwrite.Body + for range shardsVal { + bodies = append(bodies, processedBody) + } + return hcl.TokensArray(bodies) + } + shardsExpr := hcl.GetAttrExpr(shardsAttr) + tokens := hcl.TokensFromExpr(buildForExpr("i", fmt.Sprintf("range(%s)", shardsExpr), false)) + tokens = append(tokens, hcl.TokensObject(processedBody)...) + return hcl.EncloseBracketsNewLines(tokens) +} + +type dynamicBlock struct { + block *hclwrite.Block + forEach *hclwrite.Attribute + content *hclwrite.Block + tokens hclwrite.Tokens +} + +func (d dynamicBlock) IsPresent() bool { + return d.block != nil +} + +// getDynamicBlock finds and returns a dynamic block with the given name from the body +func getDynamicBlock(body *hclwrite.Body, name string) (dynamicBlock, error) { + for _, block := range body.Blocks() { + if block.Type() != nDynamic || name != getResourceName(block) { + continue + } + blockb := block.Body() + forEach := blockb.GetAttribute(nForEach) + if forEach == nil { + return dynamicBlock{}, fmt.Errorf("dynamic block %s: attribute %s not found", name, nForEach) + } + content := blockb.FirstMatchingBlock(nContent, nil) + if content == nil { + return dynamicBlock{}, fmt.Errorf("dynamic block %s: block %s not found", name, nContent) + } + return dynamicBlock{forEach: forEach, block: block, content: content}, nil + } + return dynamicBlock{}, nil +} + +func checkDynamicBlock(body *hclwrite.Body) error { + dynamicBlockAllowList := []string{nTags, nLabels, nRepSpecs} + for _, block := range body.Blocks() { + name := getResourceName(block) + if block.Type() != nDynamic || slices.Contains(dynamicBlockAllowList, name) { + continue + } + return fmt.Errorf("dynamic blocks are not supported for %s", name) + } + return nil +} + +// getResourceName returns the first label of a block, if it exists. +// e.g. in resource "mongodbatlas_cluster" "mycluster", the first label is "mongodbatlas_cluster". +func getResourceName(resource *hclwrite.Block) string { + labels := resource.Labels() + if len(labels) == 0 { + return "" + } + return labels[0] +} + +// transformReference changes value references, +// e.g. regions_config.value.electable_nodes to region.electable_nodes +func transformReference(expr, blockName, varName string) string { + return strings.ReplaceAll(expr, + fmt.Sprintf("%s.%s.", blockName, nValue), + fmt.Sprintf("%s.", varName)) +} + +// transformReferences transforms all attribute references in a body from dynamic block format +func transformReferences(body *hclwrite.Body, blockName, varName string) { + for name, attr := range body.Attributes() { + expr := transformReference(hcl.GetAttrExpr(attr), blockName, varName) + body.SetAttributeRaw(name, hcl.TokensFromExpr(expr)) + } + for _, block := range body.Blocks() { + transformReferences(block.Body(), blockName, varName) + } +} + +// collectBlocks removes and returns all blocks of the given name from body in order of appearance. +func collectBlocks(body *hclwrite.Body, name string) []*hclwrite.Block { + var blocks []*hclwrite.Block + for _, block := range body.Blocks() { + if block.Type() == name { + blocks = append(blocks, block) + body.RemoveBlock(block) + } + } + return blocks +} + +// fillBlockOpt converts a block to an attribute with object value +func fillBlockOpt(resourceb *hclwrite.Body, name string) { + block := resourceb.FirstMatchingBlock(name, nil) + if block == nil { + return + } + resourceb.RemoveBlock(block) + resourceb.SetAttributeRaw(name, hcl.TokensObject(block.Body())) +} + +// fillAdvConfigOpt fills the advanced_configuration attribute, removing deprecated attributes +func fillAdvConfigOpt(resourceb *hclwrite.Body) { + block := resourceb.FirstMatchingBlock(nAdvConfig, nil) + if block == nil { + return + } + blockBody := block.Body() + + // Remove deprecated attributes from advanced_configuration + blockBody.RemoveAttribute(nFailIndexKeyTooLong) + blockBody.RemoveAttribute(nDefaultReadConcern) + + fillBlockOpt(resourceb, nAdvConfig) +} + +// buildForExpr builds a for expression with the given variable and collection +func buildForExpr(varName, collection string, trailingSpace bool) string { + expr := fmt.Sprintf("for %s in %s :", varName, collection) + if trailingSpace { + expr += " " + } + return expr +} + +func fillTagsLabelsOpt(resourceb *hclwrite.Body, name string) error { + tokensDynamic, err := extractTagsLabelsDynamicBlock(resourceb, name) + if err != nil { + return err + } + tokensIndividual, err := extractTagsLabelsIndividual(resourceb, name) + if err != nil { + return err + } + if tokensDynamic != nil && tokensIndividual != nil { + resourceb.SetAttributeRaw(name, hcl.TokensFuncMerge(tokensDynamic, tokensIndividual)) + return nil + } + if tokensDynamic != nil { + resourceb.SetAttributeRaw(name, tokensDynamic) + } + if tokensIndividual != nil { + resourceb.SetAttributeRaw(name, tokensIndividual) + } + return nil +} + +func extractTagsLabelsDynamicBlock(resourceb *hclwrite.Body, name string) (hclwrite.Tokens, error) { + d, err := getDynamicBlock(resourceb, name) + if err != nil || !d.IsPresent() { + return nil, err + } + key := d.content.Body().GetAttribute(nKey) + value := d.content.Body().GetAttribute(nValue) + if key == nil || value == nil { + return nil, fmt.Errorf("dynamic block %s: %s or %s not found", name, nKey, nValue) + } + keyExpr := replaceDynamicBlockExpr(key, name, nKey) + valueExpr := replaceDynamicBlockExpr(value, name, nValue) + collectionExpr := hcl.GetAttrExpr(d.forEach) + forExpr := fmt.Sprintf("for key, value in %s : %s => %s", collectionExpr, keyExpr, valueExpr) + tokens := hcl.EncloseBraces(hcl.EncloseNewLines(hcl.TokensFromExpr(forExpr)), false) + if keyExpr == nKey && valueExpr == nValue { // expression can be simplified and use for_each expression + tokens = hcl.TokensFromExpr(collectionExpr) + } + resourceb.RemoveBlock(d.block) + return tokens, nil +} + +func extractTagsLabelsIndividual(resourceb *hclwrite.Body, name string) (hclwrite.Tokens, error) { + var ( + fileb = hclwrite.NewEmptyFile().Body() + ) + blocks := collectBlocks(resourceb, name) + if len(blocks) == 0 { + return nil, nil + } + for _, block := range blocks { + key := block.Body().GetAttribute(nKey) + value := block.Body().GetAttribute(nValue) + if key == nil || value == nil { + return nil, fmt.Errorf("%s: %s or %s not found", name, nKey, nValue) + } + setKeyValue(fileb, key, value) + resourceb.RemoveBlock(block) + } + return hcl.TokensObject(fileb), nil +} diff --git a/internal/convert/testdata/adv2v2/dynamic_region_configs.in.tf b/internal/convert/testdata/adv2v2/dynamic_region_configs.in.tf new file mode 100644 index 0000000..ddf9ba0 --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_region_configs.in.tf @@ -0,0 +1,123 @@ +resource "mongodbatlas_advanced_cluster" "dynamic_regions_config" { + project_id = var.project_id + name = "cluster" + cluster_type = "SHARDED" + replication_specs { + num_shards = var.replication_specs.num_shards + zone_name = var.zone_name + dynamic "region_configs" { + for_each = var.replication_specs.region_configs + content { + priority = region_configs.value.prio + provider_name = "AWS" + region_name = region_configs.value.region_name + electable_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.node_count + } + } + } + } +} + +resource "mongodbatlas_advanced_cluster" "using_disk_size_gb" { + project_id = var.project_id + name = "cluster" + cluster_type = "SHARDED" + disk_size_gb = 123 + replication_specs { + num_shards = var.replication_specs.num_shards + zone_name = var.zone_name + dynamic "region_configs" { + for_each = var.replication_specs.region_configs + content { + priority = region_configs.value.prio + provider_name = "AWS" + region_name = region_configs.value.region_name + electable_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.node_count + } + } + } + } +} + +resource "mongodbatlas_advanced_cluster" "all_specs" { + project_id = var.project_id + name = "cluster" + cluster_type = "SHARDED" + disk_size_gb = 123 + replication_specs { + num_shards = var.replication_specs.num_shards + zone_name = var.zone_name + dynamic "region_configs" { + for_each = var.replication_specs.region_configs + content { + priority = region_configs.value.prio + provider_name = "AWS" + region_name = region_configs.value.region_name + electable_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.node_count + } + read_only_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.node_count_read_only + } + analytics_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.node_count_analytics + } + auto_scaling { + disk_gb_enabled = region_configs.value.enable_disk_gb + } + analytics_auto_scaling { + compute_enabled = region_configs.value.enable_compute + } + } + } + } +} + +# example of variable for demostration purposes, not used in the conversion +variable "replication_specs" { + type = object({ + num_shards = number + region_configs = list(object({ + prio = number + region_name = string + instance_size = string + node_count = number + node_count_read_only = number + node_count_analytics = number + enable_disk_gb = bool + enable_compute = bool + })) + }) + default = { + num_shards = 3 + region_configs = [ + { + prio = 7 + region_name = "US_EAST_1" + instance_size = "M10" + node_count = 2 + node_count_read_only = 1 + node_count_analytics = 0 + enable_disk_gb = true + enable_compute = false + }, + { + prio = 6 + region_name = "US_WEST_2" + instance_size = "M10" + node_count = 1 + node_count_read_only = 0 + node_count_analytics = 1 + enable_disk_gb = false + enable_compute = true + } + ] + } +} diff --git a/internal/convert/testdata/adv2v2/dynamic_region_configs.out.tf b/internal/convert/testdata/adv2v2/dynamic_region_configs.out.tf new file mode 100644 index 0000000..0027791 --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_region_configs.out.tf @@ -0,0 +1,131 @@ +resource "mongodbatlas_advanced_cluster" "dynamic_regions_config" { + project_id = var.project_id + name = "cluster" + cluster_type = "SHARDED" + replication_specs = [ + for i in range(var.replication_specs.num_shards) : { + zone_name = var.zone_name + region_configs = [ + for region in var.replication_specs.region_configs : { + priority = region.prio + provider_name = "AWS" + region_name = region.region_name + electable_specs = { + instance_size = region.instance_size + node_count = region.node_count + } + } + ] + } + ] + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "using_disk_size_gb" { + project_id = var.project_id + name = "cluster" + cluster_type = "SHARDED" + replication_specs = [ + for i in range(var.replication_specs.num_shards) : { + zone_name = var.zone_name + region_configs = [ + for region in var.replication_specs.region_configs : { + priority = region.prio + provider_name = "AWS" + region_name = region.region_name + electable_specs = { + instance_size = region.instance_size + node_count = region.node_count + disk_size_gb = 123 + } + } + ] + } + ] + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "all_specs" { + project_id = var.project_id + name = "cluster" + cluster_type = "SHARDED" + replication_specs = [ + for i in range(var.replication_specs.num_shards) : { + zone_name = var.zone_name + region_configs = [ + for region in var.replication_specs.region_configs : { + priority = region.prio + provider_name = "AWS" + region_name = region.region_name + electable_specs = { + instance_size = region.instance_size + node_count = region.node_count + disk_size_gb = 123 + } + read_only_specs = { + instance_size = region.instance_size + node_count = region.node_count_read_only + disk_size_gb = 123 + } + analytics_specs = { + instance_size = region.instance_size + node_count = region.node_count_analytics + disk_size_gb = 123 + } + auto_scaling = { + disk_gb_enabled = region.enable_disk_gb + } + analytics_auto_scaling = { + compute_enabled = region.enable_compute + } + } + ] + } + ] + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +# example of variable for demostration purposes, not used in the conversion +variable "replication_specs" { + type = object({ + num_shards = number + region_configs = list(object({ + prio = number + region_name = string + instance_size = string + node_count = number + node_count_read_only = number + node_count_analytics = number + enable_disk_gb = bool + enable_compute = bool + })) + }) + default = { + num_shards = 3 + region_configs = [ + { + prio = 7 + region_name = "US_EAST_1" + instance_size = "M10" + node_count = 2 + node_count_read_only = 1 + node_count_analytics = 0 + enable_disk_gb = true + enable_compute = false + }, + { + prio = 6 + region_name = "US_WEST_2" + instance_size = "M10" + node_count = 1 + node_count_read_only = 0 + node_count_analytics = 1 + enable_disk_gb = false + enable_compute = true + } + ] + } +} diff --git a/internal/convert/testdata/adv2v2/dynamic_replication_specs.in.tf b/internal/convert/testdata/adv2v2/dynamic_replication_specs.in.tf new file mode 100644 index 0000000..6823e4b --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_replication_specs.in.tf @@ -0,0 +1,154 @@ +resource "mongodbatlas_advanced_cluster" "dynamic_replication_specs" { + lifecycle { + precondition { + condition = !(var.auto_scaling_disk_gb_enabled && var.disk_size > 0) + error_message = "Must use either auto_scaling_disk_gb_enabled or disk_size, not both." + } + } + + project_id = var.project_id + name = var.cluster_name + cluster_type = "GEOSHARDED" + + dynamic "tags" { + for_each = var.tags + content { + key = tags.key + value = tags.value + } + } + tags { + key = "Tag 2" + value = "Value 2" + } + + dynamic "replication_specs" { + for_each = var.replication_specs + content { + num_shards = replication_specs.value.num_shards + zone_name = replication_specs.value.zone_name + dynamic "region_configs" { + for_each = replication_specs.value.region_configs + content { + priority = region_configs.value.priority + provider_name = region_configs.value.provider_name + region_name = region_configs.value.region_name + electable_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.electable_node_count + } + read_only_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.read_only_node_count + } + } + } + } + } +} + +resource "mongodbatlas_advanced_cluster" "all_specs" { + project_id = var.project_id + name = var.cluster_name + cluster_type = "GEOSHARDED" + disk_size_gb = 123 + + dynamic "replication_specs" { + for_each = var.replication_specs + content { + num_shards = replication_specs.value.num_shards + zone_name = replication_specs.value.zone_name + dynamic "region_configs" { + for_each = replication_specs.value.region_configs + content { + priority = region_configs.value.priority + provider_name = region_configs.value.provider_name + region_name = region_configs.value.region_name + electable_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.electable_node_count + } + read_only_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.read_only_node_count + } + analytics_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.analytics_node_count + } + auto_scaling { + disk_gb_enabled = region_configs.value.enable_disk_gb + } + analytics_auto_scaling { + compute_enabled = region_configs.value.enable_compute + } + } + } + } + } +} + +# example of variable for demostration purposes, not used in the conversion +variable "replication_specs" { + description = "List of replication specifications in mongodbatlas_advanced_cluster format" + type = list(object({ + num_shards = number + zone_name = string + region_configs = list(object({ + provider_name = string + region_name = string + instance_size = string + electable_node_count = number + read_only_node_count = number + analytics_node_count = number + enable_disk_gb = bool + enable_compute = bool + priority = number + })) + })) + default = [ + { + num_shards = 1 + zone_name = "Zone A" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + instance_size = "M10" + electable_node_count = 3 + read_only_node_count = 0 + analytics_node_count = 0 + enable_disk_gb = true + enable_compute = false + priority = 7 + } + ] + }, { + num_shards = 2 + zone_name = "Zone B" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_WEST_2" + instance_size = "M10" + electable_node_count = 2 + read_only_node_count = 1 + analytics_node_count = 1 + enable_disk_gb = false + enable_compute = true + priority = 7 + }, { + provider_name = "AWS" + region_name = "EU_WEST_1" + instance_size = "M10" + electable_node_count = 1 + read_only_node_count = 0 + analytics_node_count = 0 + enable_disk_gb = true + enable_compute = false + priority = 6 + } + ] + } + ] +} diff --git a/internal/convert/testdata/adv2v2/dynamic_replication_specs.out.tf b/internal/convert/testdata/adv2v2/dynamic_replication_specs.out.tf new file mode 100644 index 0000000..513273c --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_replication_specs.out.tf @@ -0,0 +1,153 @@ +resource "mongodbatlas_advanced_cluster" "dynamic_replication_specs" { + lifecycle { + precondition { + condition = !(var.auto_scaling_disk_gb_enabled && var.disk_size > 0) + error_message = "Must use either auto_scaling_disk_gb_enabled or disk_size, not both." + } + } + + project_id = var.project_id + name = var.cluster_name + cluster_type = "GEOSHARDED" + + + replication_specs = flatten([ + for spec in var.replication_specs : [ + for i in range(spec.num_shards) : { + zone_name = spec.zone_name + region_configs = [ + for region in spec.region_configs : { + priority = region.priority + provider_name = region.provider_name + region_name = region.region_name + electable_specs = { + instance_size = region.instance_size + node_count = region.electable_node_count + } + read_only_specs = { + instance_size = region.instance_size + node_count = region.read_only_node_count + } + } + ] + } + ] + ]) + tags = merge( + var.tags, + { + "Tag 2" = "Value 2" + } + ) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "all_specs" { + project_id = var.project_id + name = var.cluster_name + cluster_type = "GEOSHARDED" + + replication_specs = flatten([ + for spec in var.replication_specs : [ + for i in range(spec.num_shards) : { + zone_name = spec.zone_name + region_configs = [ + for region in spec.region_configs : { + priority = region.priority + provider_name = region.provider_name + region_name = region.region_name + electable_specs = { + instance_size = region.instance_size + node_count = region.electable_node_count + disk_size_gb = 123 + } + read_only_specs = { + instance_size = region.instance_size + node_count = region.read_only_node_count + disk_size_gb = 123 + } + analytics_specs = { + instance_size = region.instance_size + node_count = region.analytics_node_count + disk_size_gb = 123 + } + auto_scaling = { + disk_gb_enabled = region.enable_disk_gb + } + analytics_auto_scaling = { + compute_enabled = region.enable_compute + } + } + ] + } + ] + ]) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +# example of variable for demostration purposes, not used in the conversion +variable "replication_specs" { + description = "List of replication specifications in mongodbatlas_advanced_cluster format" + type = list(object({ + num_shards = number + zone_name = string + region_configs = list(object({ + provider_name = string + region_name = string + instance_size = string + electable_node_count = number + read_only_node_count = number + analytics_node_count = number + enable_disk_gb = bool + enable_compute = bool + priority = number + })) + })) + default = [ + { + num_shards = 1 + zone_name = "Zone A" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + instance_size = "M10" + electable_node_count = 3 + read_only_node_count = 0 + analytics_node_count = 0 + enable_disk_gb = true + enable_compute = false + priority = 7 + } + ] + }, { + num_shards = 2 + zone_name = "Zone B" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_WEST_2" + instance_size = "M10" + electable_node_count = 2 + read_only_node_count = 1 + analytics_node_count = 1 + enable_disk_gb = false + enable_compute = true + priority = 7 + }, { + provider_name = "AWS" + region_name = "EU_WEST_1" + instance_size = "M10" + electable_node_count = 1 + read_only_node_count = 0 + analytics_node_count = 0 + enable_disk_gb = true + enable_compute = false + priority = 6 + } + ] + } + ] +} diff --git a/internal/convert/testdata/adv2v2/dynamic_replication_specs_different_var_name.in.tf b/internal/convert/testdata/adv2v2/dynamic_replication_specs_different_var_name.in.tf new file mode 100644 index 0000000..e282e6c --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_replication_specs_different_var_name.in.tf @@ -0,0 +1,48 @@ +resource "mongodbatlas_advanced_cluster" "different_var_names" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + dynamic "replication_specs" { + for_each = var.my_rep_specs + content { + num_shards = replication_specs.value.my_shards + zone_name = replication_specs.value.my_zone + + dynamic "region_configs" { + for_each = replication_specs.value.my_regions + content { + priority = region_configs.value.prio + provider_name = region_configs.value.provider_name + region_name = region_configs.value.my_region_name + electable_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.my_electable_node_count + } + } + } + } + } +} + +resource "mongodbatlas_advanced_cluster" "different_var_names_no_zone_name_no_num_shards" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + dynamic "replication_specs" { + for_each = var.my_rep_specs + content { + dynamic "region_configs" { + for_each = replication_specs.value.my_regions + content { + priority = region_configs.value.prio + provider_name = region_configs.value.provider_name + region_name = region_configs.value.my_region_name + electable_specs { + instance_size = region_configs.value.instance_size + node_count = region_configs.value.my_electable_node_count + } + } + } + } + } +} diff --git a/internal/convert/testdata/adv2v2/dynamic_replication_specs_different_var_name.out.tf b/internal/convert/testdata/adv2v2/dynamic_replication_specs_different_var_name.out.tf new file mode 100644 index 0000000..65db111 --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_replication_specs_different_var_name.out.tf @@ -0,0 +1,50 @@ +resource "mongodbatlas_advanced_cluster" "different_var_names" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + replication_specs = flatten([ + for spec in var.my_rep_specs : [ + for i in range(spec.my_shards) : { + zone_name = spec.my_zone + region_configs = [ + for region in spec.region_configs : { + priority = region.prio + provider_name = region.provider_name + region_name = region.my_region_name + electable_specs = { + instance_size = region.instance_size + node_count = region.my_electable_node_count + } + } + ] + } + ] + ]) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "different_var_names_no_zone_name_no_num_shards" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + replication_specs = flatten([ + for spec in var.my_rep_specs : [ + { + region_configs = [ + for region in spec.region_configs : { + priority = region.prio + provider_name = region.provider_name + region_name = region.my_region_name + electable_specs = { + instance_size = region.instance_size + node_count = region.my_electable_node_count + } + } + ] + } + ] + ]) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} diff --git a/internal/convert/testdata/adv2v2/dynamic_tags_labels.in.tf b/internal/convert/testdata/adv2v2/dynamic_tags_labels.in.tf new file mode 100644 index 0000000..dfbb4f5 --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_tags_labels.in.tf @@ -0,0 +1,164 @@ +resource "mongodbatlas_advanced_cluster" "simplified" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + dynamic "tags" { + for_each = local.tags + content { // simplified version where var can be used directly + key = tags.key + value = tags.value + } + } +} + +resource "mongodbatlas_advanced_cluster" "expression" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + dynamic "tags" { + for_each = local.tags + content { // using expressions + key = tags.key + value = replace(tags.value, "/", "_") + } + } +} + +resource "mongodbatlas_advanced_cluster" "simplified_individual" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + tags { // using individual tags apart from simplified version in dynamic tags + key = "tag1" + value = var.tag1val + } + dynamic "tags" { + for_each = var.tags + content { // simplified version where var can be used directly + key = tags.key + value = tags.value + } + } + tags { + key = "tag 2" + value = var.tag2val + } +} + +resource "mongodbatlas_advanced_cluster" "expression_individual" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + tags { // using individual tags apart from expressions in dynamic tags + key = "tag1" + value = var.tag1val + } + dynamic "tags" { + for_each = var.tags + content { // using expressions + key = tags.key + value = replace(tags.value, "/", "_") + } + } + tags { + key = "tag 2" + value = var.tag2val + } +} + +resource "mongodbatlas_advanced_cluster" "full_example" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + labels { + key = "label1" + value = "label1val" + } + labels { + key = "label2" + value = data.my_resource.my_data.value + } + dynamic "labels" { + for_each = local.tags + content { + key = labels.key + value = labels.value + } + } + tags { + key = "environment" + value = "dev" + } + tags { + key = var.tag_key # non-literal values are supported and enclosed in parentheses + value = var.tag_value + } + dynamic "tags" { + for_each = var.tags + content { + key = tags.key + value = replace(tags.value, "/", "_") + } + } + lifecycle { + precondition { + condition = local.use_new_replication_specs || !(var.auto_scaling_disk_gb_enabled && var.disk_size > 0) + error_message = "Must use either auto_scaling_disk_gb_enabled or disk_size, not both." + } + } +} diff --git a/internal/convert/testdata/adv2v2/dynamic_tags_labels.out.tf b/internal/convert/testdata/adv2v2/dynamic_tags_labels.out.tf new file mode 100644 index 0000000..cf16f57 --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_tags_labels.out.tf @@ -0,0 +1,156 @@ +resource "mongodbatlas_advanced_cluster" "simplified" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs = [ + { + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs = { + instance_size = "M10" + node_count = 3 + } + } + ] + } + ] + tags = local.tags + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "expression" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs = [ + { + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs = { + instance_size = "M10" + node_count = 3 + } + } + ] + } + ] + tags = { + for key, value in local.tags : key => replace(value, "/", "_") + } + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "simplified_individual" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs = [ + { + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs = { + instance_size = "M10" + node_count = 3 + } + } + ] + } + ] + tags = merge( + var.tags, + { + tag1 = var.tag1val + "tag 2" = var.tag2val + } + ) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "expression_individual" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + replication_specs = [ + { + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs = { + instance_size = "M10" + node_count = 3 + } + } + ] + } + ] + tags = merge( + { + for key, value in var.tags : key => replace(value, "/", "_") + }, + { + tag1 = var.tag1val + "tag 2" = var.tag2val + } + ) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "full_example" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + lifecycle { + precondition { + condition = local.use_new_replication_specs || !(var.auto_scaling_disk_gb_enabled && var.disk_size > 0) + error_message = "Must use either auto_scaling_disk_gb_enabled or disk_size, not both." + } + } + replication_specs = [ + { + region_configs = [ + { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs = { + instance_size = "M10" + node_count = 3 + } + } + ] + } + ] + tags = merge( + { + for key, value in var.tags : key => replace(value, "/", "_") + }, + { + environment = "dev" + (var.tag_key) = var.tag_value + } + ) + labels = merge( + local.tags, + { + label1 = "label1val" + label2 = data.my_resource.my_data.value + } + ) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} diff --git a/internal/convert/testdata/adv2v2/dynamic_unsupported_tag.in.tf b/internal/convert/testdata/adv2v2/dynamic_unsupported_tag.in.tf new file mode 100644 index 0000000..85884a7 --- /dev/null +++ b/internal/convert/testdata/adv2v2/dynamic_unsupported_tag.in.tf @@ -0,0 +1,25 @@ +resource "mongodbatlas_advanced_cluster" "this" { + project_id = var.project_id + name = "cluster" + cluster_type = "REPLICASET" + + # dynamic blocks are only supported for tags, labels, replication_specs and region_configs + dynamic "advanced_configuration" { + for_each = var.advanced_configuration + content { + javascript_enabled = advanced_configuration.value.javascript_enabled + } + } + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } +} diff --git a/internal/convert/testdata/adv2v2/errors.json b/internal/convert/testdata/adv2v2/errors.json index 5debe17..dae5a07 100644 --- a/internal/convert/testdata/adv2v2/errors.json +++ b/internal/convert/testdata/adv2v2/errors.json @@ -2,5 +2,5 @@ "configuration_file_error": "failed to parse Terraform config file", "replication_specs_missing_region_configs": "replication_specs must have at least one region_configs", "missing_replication_specs": "must have at least one replication_specs", - "num_shards_not_numerical": "setting num_shards: failed to evaluate number" + "dynamic_unsupported_tag": "dynamic blocks are not supported for advanced_configuration" } diff --git a/internal/convert/testdata/adv2v2/num_shards.in.tf b/internal/convert/testdata/adv2v2/num_shards.in.tf index 8677e2b..faa6efc 100644 --- a/internal/convert/testdata/adv2v2/num_shards.in.tf +++ b/internal/convert/testdata/adv2v2/num_shards.in.tf @@ -1,4 +1,4 @@ -resource "mongodbatlas_advanced_cluster" "geo" { +resource "mongodbatlas_advanced_cluster" "numerical_num_shards" { project_id = var.project_id name = "geo" cluster_type = "GEOSHARDED" @@ -39,7 +39,7 @@ resource "mongodbatlas_advanced_cluster" "geo" { } } -resource "mongodbatlas_advanced_cluster" "geo" { +resource "mongodbatlas_advanced_cluster" "numerical_num_shards_and_disk_size_gb" { project_id = var.project_id name = "geo" cluster_type = "GEOSHARDED" @@ -58,3 +58,104 @@ resource "mongodbatlas_advanced_cluster" "geo" { } } } + +resource "mongodbatlas_advanced_cluster" "variable_num_shards" { + project_id = var.project_id + name = "geo" + cluster_type = "GEOSHARDED" + replication_specs { + zone_name = "Zone 1" + num_shards = var.num_shards + region_configs { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs { + node_count = 3 + instance_size = "M10" + } + } + } +} + +resource "mongodbatlas_advanced_cluster" "multiple_variable_num_shards" { + project_id = var.project_id + name = "geo" + cluster_type = "GEOSHARDED" + replication_specs { + zone_name = "Zone 1" + num_shards = var.num_shards_rep1 + region_configs { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs { + node_count = 3 + instance_size = "M10" + } + } + } + replication_specs { + zone_name = "Zone 2" + num_shards = var.num_shards_rep2 + region_configs { + provider_name = "AWS" + region_name = "US_WEST_2" + priority = 7 + electable_specs { + node_count = 2 + instance_size = "M10" + } + } + region_configs { + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + priority = 6 + electable_specs { + node_count = 1 + instance_size = "M10" + } + } + } +} + +resource "mongodbatlas_advanced_cluster" "mix_variable_numerical_num_shards" { + project_id = var.project_id + name = "geo" + cluster_type = "GEOSHARDED" + replication_specs { + zone_name = "Zone 1" + num_shards = 2 + region_configs { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs { + node_count = 3 + instance_size = "M10" + } + } + } + replication_specs { + zone_name = "Zone 2" + num_shards = var.num_shards_rep2 + region_configs { + provider_name = "AWS" + region_name = "US_WEST_2" + priority = 7 + electable_specs { + node_count = 2 + instance_size = "M10" + } + } + region_configs { + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + priority = 6 + electable_specs { + node_count = 1 + instance_size = "M10" + } + } + } +} diff --git a/internal/convert/testdata/adv2v2/num_shards.out.tf b/internal/convert/testdata/adv2v2/num_shards.out.tf index 75c2375..c2cdb98 100644 --- a/internal/convert/testdata/adv2v2/num_shards.out.tf +++ b/internal/convert/testdata/adv2v2/num_shards.out.tf @@ -1,4 +1,4 @@ -resource "mongodbatlas_advanced_cluster" "geo" { +resource "mongodbatlas_advanced_cluster" "numerical_num_shards" { project_id = var.project_id name = "geo" cluster_type = "GEOSHARDED" @@ -105,7 +105,7 @@ resource "mongodbatlas_advanced_cluster" "geo" { # Updated by atlas-cli-plugin-terraform, please review the changes. } -resource "mongodbatlas_advanced_cluster" "geo" { +resource "mongodbatlas_advanced_cluster" "numerical_num_shards_and_disk_size_gb" { project_id = var.project_id name = "geo" cluster_type = "GEOSHARDED" @@ -144,3 +144,143 @@ resource "mongodbatlas_advanced_cluster" "geo" { # Updated by atlas-cli-plugin-terraform, please review the changes. } + +resource "mongodbatlas_advanced_cluster" "variable_num_shards" { + project_id = var.project_id + name = "geo" + cluster_type = "GEOSHARDED" + replication_specs = [ + for i in range(var.num_shards) : { + zone_name = "Zone 1" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + } + } + ] + } + ] + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "multiple_variable_num_shards" { + project_id = var.project_id + name = "geo" + cluster_type = "GEOSHARDED" + replication_specs = concat( + [ + for i in range(var.num_shards_rep1) : { + zone_name = "Zone 1" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + } + } + ] + } + ], + [ + for i in range(var.num_shards_rep2) : { + zone_name = "Zone 2" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_WEST_2" + priority = 7 + electable_specs = { + node_count = 2 + instance_size = "M10" + } + }, + { + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + priority = 6 + electable_specs = { + node_count = 1 + instance_size = "M10" + } + } + ] + } + ] + ) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} + +resource "mongodbatlas_advanced_cluster" "mix_variable_numerical_num_shards" { + project_id = var.project_id + name = "geo" + cluster_type = "GEOSHARDED" + replication_specs = concat( + [ + { + zone_name = "Zone 1" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + } + } + ] + }, + { + zone_name = "Zone 1" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + } + } + ] + } + ], + [ + for i in range(var.num_shards_rep2) : { + zone_name = "Zone 2" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_WEST_2" + priority = 7 + electable_specs = { + node_count = 2 + instance_size = "M10" + } + }, + { + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + priority = 6 + electable_specs = { + node_count = 1 + instance_size = "M10" + } + } + ] + } + ] + ) + + # Updated by atlas-cli-plugin-terraform, please review the changes. +} diff --git a/internal/convert/testdata/adv2v2/num_shards_not_numerical.in.tf b/internal/convert/testdata/adv2v2/num_shards_not_numerical.in.tf deleted file mode 100644 index 396355f..0000000 --- a/internal/convert/testdata/adv2v2/num_shards_not_numerical.in.tf +++ /dev/null @@ -1,18 +0,0 @@ -resource "mongodbatlas_advanced_cluster" "geo" { - project_id = var.project_id - name = "geo" - cluster_type = "GEOSHARDED" - replication_specs { - zone_name = "Zone 1" - num_shards = var.num_shards # unresolved num_shards - region_configs { - provider_name = "AWS" - region_name = "US_EAST_1" - priority = 7 - electable_specs { - node_count = 3 - instance_size = "M10" - } - } - } -} diff --git a/internal/convert/testdata/clu2adv/autoscaling.in.tf b/internal/convert/testdata/clu2adv/autoscaling.in.tf index 2936c41..e43bcd6 100644 --- a/internal/convert/testdata/clu2adv/autoscaling.in.tf +++ b/internal/convert/testdata/clu2adv/autoscaling.in.tf @@ -2,7 +2,7 @@ resource "mongodbatlas_cluster" "autoscaling" { project_id = var.project_id name = var.cluster_name disk_size_gb = 100 - num_shards = 1 + num_shards = 1 cluster_type = "REPLICASET" replication_specs { diff --git a/internal/convert/testdata/clu2adv/autoscaling_missing_attribute.in.tf b/internal/convert/testdata/clu2adv/autoscaling_missing_attribute.in.tf index f8dd8d8..123731b 100644 --- a/internal/convert/testdata/clu2adv/autoscaling_missing_attribute.in.tf +++ b/internal/convert/testdata/clu2adv/autoscaling_missing_attribute.in.tf @@ -2,7 +2,7 @@ resource "mongodbatlas_cluster" "autoscaling" { project_id = var.project_id name = var.cluster_name disk_size_gb = 100 - num_shards = 1 + num_shards = 1 cluster_type = "REPLICASET" replication_specs { diff --git a/internal/convert/testdata/clu2adv/dynamic_unsupported_tag.in.tf b/internal/convert/testdata/clu2adv/dynamic_unsupported_tag.in.tf new file mode 100644 index 0000000..1b0c752 --- /dev/null +++ b/internal/convert/testdata/clu2adv/dynamic_unsupported_tag.in.tf @@ -0,0 +1,16 @@ +resource "mongodbatlas_cluster" "this" { + project_id = var.project_id + name = var.cluster_name + cluster_type = var.cluster_type + mongo_db_major_version = var.mongo_db_major_version + provider_instance_size_name = var.instance_size + provider_name = var.provider_name + + # dynamic blocks are only supported for tags, labels, replication_specs and regions_config + dynamic "advanced_configuration" { + for_each = var.advanced_configuration + content { + javascript_enabled = advanced_configuration.value.javascript_enabled + } + } +} diff --git a/internal/convert/testdata/clu2adv/errors.json b/internal/convert/testdata/clu2adv/errors.json index 67fa980..266aa5e 100644 --- a/internal/convert/testdata/clu2adv/errors.json +++ b/internal/convert/testdata/clu2adv/errors.json @@ -5,5 +5,5 @@ "regions_config_missing_priority": "setting replication_specs: attribute priority not found", "replication_specs_missing_num_shards": "num_shards not found", "replication_specs_missing_regions_config": "setting replication_specs: regions_config not found", - "replication_specs_non_literal_num_shards": "setting num_shards: failed to evaluate number" + "dynamic_unsupported_tag": "dynamic blocks are not supported for advanced_configuration" } diff --git a/internal/convert/testdata/clu2adv/free_cluster_missing_attribute.in.tf b/internal/convert/testdata/clu2adv/free_cluster_missing_attribute.in.tf index 2c3f37b..2f17b21 100644 --- a/internal/convert/testdata/clu2adv/free_cluster_missing_attribute.in.tf +++ b/internal/convert/testdata/clu2adv/free_cluster_missing_attribute.in.tf @@ -4,9 +4,9 @@ resource "resource1" "res1" { resource "mongodbatlas_cluster" "free_cluster" { # comment in the resource # comment in own line in the beginning - count = local.use_free_cluster ? 1 : 0 - project_id = var.project_id # inline comment kept - name = var.cluster_name + count = local.use_free_cluster ? 1 : 0 + project_id = var.project_id # inline comment kept + name = var.cluster_name # comment in own line in the middle is deleted provider_name = "TENANT" # inline comment for attribute moved is not kept provider_region_name = var.region diff --git a/internal/convert/testdata/clu2adv/multi_replication_specs.in.tf b/internal/convert/testdata/clu2adv/multi_replication_specs.in.tf index 0aa09b8..de838fa 100644 --- a/internal/convert/testdata/clu2adv/multi_replication_specs.in.tf +++ b/internal/convert/testdata/clu2adv/multi_replication_specs.in.tf @@ -1,4 +1,4 @@ -resource "mongodbatlas_cluster" "multirep" { +resource "mongodbatlas_cluster" "basic" { project_id = var.project_id name = "multirep" disk_size_gb = 80 @@ -27,7 +27,7 @@ resource "mongodbatlas_cluster" "multirep" { } } -resource "mongodbatlas_cluster" "geo" { +resource "mongodbatlas_cluster" "multiple_numerical_num_shards" { project_id = "1234" name = "geo" disk_size_gb = 80 @@ -57,3 +57,73 @@ resource "mongodbatlas_cluster" "geo" { } } } + +resource "mongodbatlas_cluster" "variable_num_shards" { + project_id = var.project_id + name = "multirep" + cluster_type = "GEOSHARDED" + provider_name = "AWS" + provider_instance_size_name = "M10" + replication_specs { + zone_name = "Zone 1" + num_shards = var.num_shards + regions_config { + region_name = "US_EAST_1" + electable_nodes = 3 + priority = 7 + } + } +} + +resource "mongodbatlas_cluster" "multiple_variable_num_shards" { + project_id = var.project_id + name = "multirep" + cluster_type = "GEOSHARDED" + provider_name = "AWS" + provider_instance_size_name = "M10" + replication_specs { + zone_name = "Zone 1" + num_shards = var.num_shards_rep1 + regions_config { + region_name = "US_EAST_1" + electable_nodes = 3 + priority = 7 + } + } + replication_specs { + zone_name = "Zone 2" + num_shards = var.num_shards_rep2 + regions_config { + region_name = "US_WEST_2" + electable_nodes = 3 + priority = 7 + } + } +} + +resource "mongodbatlas_cluster" "mix_variable_numerical_num_shards" { + project_id = var.project_id + name = "multirep" + cluster_type = "GEOSHARDED" + provider_name = "AWS" + provider_instance_size_name = "M10" + disk_size_gb = 80 + replication_specs { + zone_name = "Zone 1" + num_shards = 2 + regions_config { + region_name = "US_EAST_1" + electable_nodes = 3 + priority = 7 + } + } + replication_specs { + zone_name = "Zone 2" + num_shards = var.num_shards_rep2 + regions_config { + region_name = "US_WEST_2" + electable_nodes = 3 + priority = 7 + } + } +} diff --git a/internal/convert/testdata/clu2adv/multi_replication_specs.out.tf b/internal/convert/testdata/clu2adv/multi_replication_specs.out.tf index cbb3526..3544d03 100644 --- a/internal/convert/testdata/clu2adv/multi_replication_specs.out.tf +++ b/internal/convert/testdata/clu2adv/multi_replication_specs.out.tf @@ -1,4 +1,4 @@ -resource "mongodbatlas_advanced_cluster" "multirep" { +resource "mongodbatlas_advanced_cluster" "basic" { project_id = var.project_id name = "multirep" cluster_type = "GEOSHARDED" @@ -40,7 +40,7 @@ resource "mongodbatlas_advanced_cluster" "multirep" { # Please review the changes and confirm that references to this resource are updated. } -resource "mongodbatlas_advanced_cluster" "geo" { +resource "mongodbatlas_advanced_cluster" "multiple_numerical_num_shards" { project_id = "1234" name = "geo" cluster_type = "GEOSHARDED" @@ -126,3 +126,131 @@ resource "mongodbatlas_advanced_cluster" "geo" { # Generated by atlas-cli-plugin-terraform. # Please review the changes and confirm that references to this resource are updated. } + +resource "mongodbatlas_advanced_cluster" "variable_num_shards" { + project_id = var.project_id + name = "multirep" + cluster_type = "GEOSHARDED" + replication_specs = [ + for i in range(var.num_shards) : { + zone_name = "Zone 1" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + } + } + ] + } + ] + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} + +resource "mongodbatlas_advanced_cluster" "multiple_variable_num_shards" { + project_id = var.project_id + name = "multirep" + cluster_type = "GEOSHARDED" + replication_specs = concat( + [ + for i in range(var.num_shards_rep1) : { + zone_name = "Zone 1" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + } + } + ] + } + ], + [ + for i in range(var.num_shards_rep2) : { + zone_name = "Zone 2" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_WEST_2" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + } + } + ] + } + ] + ) + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} + +resource "mongodbatlas_advanced_cluster" "mix_variable_numerical_num_shards" { + project_id = var.project_id + name = "multirep" + cluster_type = "GEOSHARDED" + replication_specs = concat( + [ + { + zone_name = "Zone 1" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + disk_size_gb = 80 + } + } + ] + }, + { + zone_name = "Zone 1" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + disk_size_gb = 80 + } + } + ] + } + ], + [ + for i in range(var.num_shards_rep2) : { + zone_name = "Zone 2" + region_configs = [ + { + provider_name = "AWS" + region_name = "US_WEST_2" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + disk_size_gb = 80 + } + } + ] + } + ] + ) + + # Generated by atlas-cli-plugin-terraform. + # Please review the changes and confirm that references to this resource are updated. +} diff --git a/internal/convert/testdata/clu2adv/replication_specs_missing_regions_config.in.tf b/internal/convert/testdata/clu2adv/replication_specs_missing_regions_config.in.tf index b62adf1..f1fb1b4 100644 --- a/internal/convert/testdata/clu2adv/replication_specs_missing_regions_config.in.tf +++ b/internal/convert/testdata/clu2adv/replication_specs_missing_regions_config.in.tf @@ -2,13 +2,13 @@ resource "mongodbatlas_cluster" "autoscaling" { project_id = var.project_id name = var.cluster_name disk_size_gb = 100 - num_shards = 1 + num_shards = 1 cluster_type = "REPLICASET" replication_specs { num_shards = 1 } - + //Provider Settings "block" provider_name = "AWS" provider_auto_scaling_compute_min_instance_size = "M10" diff --git a/internal/convert/testdata/clu2adv/replication_specs_non_literal_num_shards.in.tf b/internal/convert/testdata/clu2adv/replication_specs_non_literal_num_shards.in.tf deleted file mode 100644 index ba37e6d..0000000 --- a/internal/convert/testdata/clu2adv/replication_specs_non_literal_num_shards.in.tf +++ /dev/null @@ -1,19 +0,0 @@ -resource "mongodbatlas_cluster" "multirep" { - project_id = var.project_id - name = "multirep" - disk_size_gb = 80 - num_shards = 1 - cloud_backup = false - cluster_type = "GEOSHARDED" - provider_name = "AWS" - provider_instance_size_name = "M10" - replication_specs { - zone_name = "Zone 1" - num_shards = var.num_shards # unresolved num_shards - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - } - } -} diff --git a/internal/hcl/hcl.go b/internal/hcl/hcl.go index 8b71cfb..0338ce7 100644 --- a/internal/hcl/hcl.go +++ b/internal/hcl/hcl.go @@ -65,17 +65,17 @@ func GetAttrInt(attr *hclwrite.Attribute, errPrefix string) (int, error) { } // GetAttrString tries to get an attribute value as a string. -func GetAttrString(attr *hclwrite.Attribute, errPrefix string) (string, error) { +func GetAttrString(attr *hclwrite.Attribute) (string, error) { expr, diags := hclsyntax.ParseExpression(attr.Expr().BuildTokens(nil).Bytes(), "", hcl.InitialPos) if diags.HasErrors() { - return "", fmt.Errorf("%s: failed to parse string: %s", errPrefix, diags.Error()) + return "", fmt.Errorf("failed to parse string: %s", diags.Error()) } val, diags := expr.Value(nil) if diags.HasErrors() { - return "", fmt.Errorf("%s: failed to evaluate string: %s", errPrefix, diags.Error()) + return "", fmt.Errorf("failed to evaluate string: %s", diags.Error()) } if !val.Type().Equals(cty.String) { - return "", fmt.Errorf("%s: attribute is not a string", errPrefix) + return "", fmt.Errorf("attribute is not a string") } return val.AsString(), nil } @@ -106,11 +106,6 @@ func TokensFromExpr(expr string) hclwrite.Tokens { return hclwrite.Tokens{{Type: hclsyntax.TokenIdent, Bytes: []byte(expr)}} } -// TokensObjectFromExpr creates an object with an expression. -func TokensObjectFromExpr(expr string) hclwrite.Tokens { - return EncloseBraces(EncloseNewLines(TokensFromExpr(expr)), false) -} - // TokensFuncMerge creates the tokens for the HCL merge function. func TokensFuncMerge(tokens ...hclwrite.Tokens) hclwrite.Tokens { params := EncloseNewLines(joinTokens(tokens...)) @@ -118,6 +113,16 @@ func TokensFuncMerge(tokens ...hclwrite.Tokens) hclwrite.Tokens { return append(ret, EncloseParens(params)...) } +// TokensFuncConcat creates the tokens for the HCL concat function. +func TokensFuncConcat(tokens ...hclwrite.Tokens) hclwrite.Tokens { + params := EncloseNewLines(joinTokens(tokens...)) + if len(tokens) == 1 { + return tokens[0] // no need to concat if there's only one element + } + ret := TokensFromExpr("concat") + return append(ret, EncloseParens(params)...) +} + // TokensFuncFlatten creates the tokens for the HCL flatten function. func TokensFuncFlatten(tokens hclwrite.Tokens) hclwrite.Tokens { ret := TokensFromExpr("flatten")