diff --git a/internal/hcl/hcl.go b/internal/hcl/hcl.go index 3a14ae8..298b35c 100644 --- a/internal/hcl/hcl.go +++ b/internal/hcl/hcl.go @@ -10,25 +10,6 @@ import ( "github.com/zclconf/go-cty/cty" ) -const ( - resourceType = "resource" - cluster = "mongodbatlas_cluster" - advCluster = "mongodbatlas_advanced_cluster" - nameReplicationSpecs = "replication_specs" - nameRegionConfigs = "region_configs" - nameElectableSpecs = "electable_specs" - nameProviderRegionName = "provider_region_name" - nameRegionName = "region_name" - nameProviderName = "provider_name" - nameBackingProviderName = "backing_provider_name" - nameProviderInstanceSizeName = "provider_instance_size_name" - nameInstanceSize = "instance_size" - nameClusterType = "cluster_type" - namePriority = "priority" - - errFreeCluster = "free cluster (because no " + nameReplicationSpecs + ")" -) - // ClusterToAdvancedCluster transforms all mongodbatlas_cluster definitions in a // Terraform configuration file into mongodbatlas_advanced_cluster schema v2 definitions. // All other resources and data sources are left untouched. @@ -45,67 +26,195 @@ func ClusterToAdvancedCluster(config []byte) ([]byte, error) { if resource.Type() != resourceType || resourceName != cluster { continue } - resourceBody := resource.Body() + resourceb := resource.Body() labels[0] = advCluster resource.SetLabels(labels) - if isFreeTier(resourceBody) { - if err := fillFreeTier(resourceBody); err != nil { - return nil, err - } + if resourceb.FirstMatchingBlock(nRepSpecs, nil) != nil { + err = fillReplicationSpecs(resourceb) + } else { + err = fillFreeTier(resourceb) + } + if err != nil { + return nil, err } - resourceBody.AppendNewline() - appendComment(resourceBody, "Generated by atlas-cli-plugin-terraform.") - appendComment(resourceBody, "Please confirm that all references to this resource are updated.") + resourceb.AppendNewline() + appendComment(resourceb, "Generated by atlas-cli-plugin-terraform.") + appendComment(resourceb, "Please confirm that all references to this resource are updated.") } return parser.Bytes(), nil } -func isFreeTier(body *hclwrite.Body) bool { - return body.FirstMatchingBlock(nameReplicationSpecs, nil) == nil -} - -func fillFreeTier(body *hclwrite.Body) error { - const ( - valClusterType = "REPLICASET" - valPriority = 7 - ) - body.SetAttributeValue(nameClusterType, cty.StringVal(valClusterType)) - regionConfig := hclwrite.NewEmptyFile() - regionConfigBody := regionConfig.Body() - setAttrInt(regionConfigBody, "priority", valPriority) - if err := moveAttribute(nameProviderRegionName, nameRegionName, body, regionConfigBody, errFreeCluster); err != nil { +// fillFreeTier is the entry point to convert clusters in free tier +func fillFreeTier(resourceb *hclwrite.Body) error { + resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType)) + config := hclwrite.NewEmptyFile() + configb := config.Body() + setAttrInt(configb, "priority", valPriority) + if err := moveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { return err } - if err := moveAttribute(nameProviderName, nameProviderName, body, regionConfigBody, errFreeCluster); err != nil { + if err := moveAttr(resourceb, configb, nProviderName, nProviderName, errFreeCluster); err != nil { return err } - if err := moveAttribute(nameBackingProviderName, nameBackingProviderName, body, regionConfigBody, errFreeCluster); err != nil { + if err := moveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil { return err } electableSpec := hclwrite.NewEmptyFile() - if err := moveAttribute(nameProviderInstanceSizeName, nameInstanceSize, body, electableSpec.Body(), errFreeCluster); err != nil { + if err := moveAttr(resourceb, electableSpec.Body(), nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil { return err } - regionConfigBody.SetAttributeRaw(nameElectableSpecs, tokensObject(electableSpec)) + configb.SetAttributeRaw(nElectableSpecs, tokensObject(electableSpec)) - replicationSpec := hclwrite.NewEmptyFile() - replicationSpec.Body().SetAttributeRaw(nameRegionConfigs, tokensArrayObject(regionConfig)) - body.SetAttributeRaw(nameReplicationSpecs, tokensArrayObject(replicationSpec)) + repSpecs := hclwrite.NewEmptyFile() + repSpecs.Body().SetAttributeRaw(nConfig, tokensArrayObject(config)) + resourceb.SetAttributeRaw(nRepSpecs, tokensArrayObject(repSpecs)) return nil } -func moveAttribute(fromAttrName, toAttrName string, fromBody, toBody *hclwrite.Body, errPrefix string) error { - attr := fromBody.GetAttribute(fromAttrName) - if attr == nil { - return fmt.Errorf("%s: attribute %s not found", errPrefix, fromAttrName) +// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier) +func fillReplicationSpecs(resourceb *hclwrite.Body) error { + root, errRoot := popRootAttrs(resourceb, errRepSpecs) + if errRoot != nil { + return errRoot } - fromBody.RemoveAttribute(fromAttrName) - toBody.SetAttributeRaw(toAttrName, attr.Expr().BuildTokens(nil)) + repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil) + configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil) + if configSrc == nil { + return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc) + } + + resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs + // ok to fail as cloud_backup is optional + _ = moveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs) + + config, errConfig := getRegionConfigs(configSrc, root) + if errConfig != nil { + return errConfig + } + repSpecs := hclwrite.NewEmptyFile() + repSpecs.Body().SetAttributeRaw(nConfig, config) + resourceb.SetAttributeRaw(nRepSpecs, tokensArrayObject(repSpecs)) + + resourceb.RemoveBlock(repSpecsSrc) return nil } +// popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them. +func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) { + var ( + reqNames = []string{ + nProviderName, + nInstanceSizeSrc, + } + optNames = []string{ + nDiskSizeGB, + nDiskGBEnabledSrc, + nComputeEnabledSrc, + nComputeMinInstanceSizeSrc, + nComputeMaxInstanceSizeSrc, + nComputeScaleDownEnabledSrc, + } + req = make(map[string]hclwrite.Tokens) + opt = make(map[string]hclwrite.Tokens) + ) + for _, name := range reqNames { + tokens, err := popAttr(body, name, errPrefix) + if err != nil { + return attrVals{}, err + } + req[name] = tokens + } + for _, name := range optNames { + tokens, _ := popAttr(body, name, errPrefix) + if tokens != nil { + opt[name] = tokens + } + } + return attrVals{req: req, opt: opt}, nil +} + +func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { + file := hclwrite.NewEmptyFile() + fileb := file.Body() + fileb.SetAttributeRaw(nProviderName, root.req[nProviderName]) + if err := moveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil { + return nil, err + } + if err := moveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil { + return nil, err + } + autoScaling := getAutoScalingOpt(root.opt) + if autoScaling != nil { + fileb.SetAttributeRaw(nAutoScaling, autoScaling) + } + electableSpecs, errElect := getElectableSpecs(configSrc, root) + if errElect != nil { + return nil, errElect + } + fileb.SetAttributeRaw(nElectableSpecs, electableSpecs) + return tokensArrayObject(file), nil +} + +func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { + file := hclwrite.NewEmptyFile() + fileb := file.Body() + if err := moveAttr(configSrc.Body(), fileb, nElectableNodes, nNodeCount, errRepSpecs); err != nil { + return nil, err + } + fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc]) + if root.opt[nDiskSizeGB] != nil { + fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB]) + } + return tokensObject(file), nil +} + +func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens { + var ( + names = [][2]string{ // use slice instead of map to preserve order + {nDiskGBEnabledSrc, nDiskGBEnabled}, + {nComputeEnabledSrc, nComputeEnabled}, + {nComputeMinInstanceSizeSrc, nComputeMinInstanceSize}, + {nComputeMaxInstanceSizeSrc, nComputeMaxInstanceSize}, + {nComputeScaleDownEnabledSrc, nComputeScaleDownEnabled}, + } + file = hclwrite.NewEmptyFile() + found = false + ) + for _, tuple := range names { + src, dst := tuple[0], tuple[1] + if tokens := opt[src]; tokens != nil { + file.Body().SetAttributeRaw(dst, tokens) + found = true + } + } + if !found { + return nil + } + return tokensObject(file) +} + +// popAttr deletes an attribute from fromBody and adds it to toBody. +func moveAttr(fromBody, toBody *hclwrite.Body, fromAttrName, toAttrName, errPrefix string) error { + tokens, err := popAttr(fromBody, fromAttrName, errPrefix) + if err == nil { + toBody.SetAttributeRaw(toAttrName, tokens) + } + return err +} + +// popAttr deletes an attribute and returns it value. +func popAttr(body *hclwrite.Body, attrName, errPrefix string) (hclwrite.Tokens, error) { + attr := body.GetAttribute(attrName) + if attr == nil { + return nil, fmt.Errorf("%s: attribute %s not found", errPrefix, attrName) + } + tokens := attr.Expr().BuildTokens(nil) + body.RemoveAttribute(attrName) + return tokens, nil +} + func setAttrInt(body *hclwrite.Body, attrName string, number int) { tokens := hclwrite.Tokens{ {Type: hclsyntax.TokenNumberLit, Bytes: []byte(strconv.Itoa(number))}, @@ -148,3 +257,50 @@ func getParser(config []byte) (*hclwrite.File, error) { } return parser, nil } + +type attrVals struct { + req map[string]hclwrite.Tokens + opt map[string]hclwrite.Tokens +} + +const ( + resourceType = "resource" + cluster = "mongodbatlas_cluster" + advCluster = "mongodbatlas_advanced_cluster" + + nRepSpecs = "replication_specs" + nConfig = "region_configs" + nConfigSrc = "regions_config" + nElectableSpecs = "electable_specs" + nAutoScaling = "auto_scaling" + nRegionNameSrc = "provider_region_name" + nRegionName = "region_name" + nProviderName = "provider_name" + nBackingProviderName = "backing_provider_name" + nInstanceSizeSrc = "provider_instance_size_name" + nInstanceSize = "instance_size" + nClusterType = "cluster_type" + nPriority = "priority" + nNumShards = "num_shards" + nBackupEnabled = "backup_enabled" + nCloudBackup = "cloud_backup" + nDiskSizeGB = "disk_size_gb" + nDiskGBEnabledSrc = "auto_scaling_disk_gb_enabled" + nComputeEnabledSrc = "auto_scaling_compute_enabled" + nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled" + nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size" + nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size" + nDiskGBEnabled = "disk_gb_enabled" + nComputeEnabled = "compute_enabled" + nComputeScaleDownEnabled = "compute_scale_down_enabled" + nComputeMinInstanceSize = "compute_min_instance_size" + nComputeMaxInstanceSize = "compute_max_instance_size" + nNodeCount = "node_count" + nElectableNodes = "electable_nodes" + + valClusterType = "REPLICASET" + valPriority = 7 + + errFreeCluster = "free cluster (because no " + nRepSpecs + ")" + errRepSpecs = "setting " + nRepSpecs +) diff --git a/internal/hcl/hcl_test.go b/internal/hcl/hcl_test.go index 5f6619f..c53056c 100644 --- a/internal/hcl/hcl_test.go +++ b/internal/hcl/hcl_test.go @@ -35,15 +35,17 @@ func TestClusterToAdvancedCluster(t *testing.T) { assert.NotEmpty(t, inputFiles) for _, inputFile := range inputFiles { testName := strings.TrimSuffix(filepath.Base(inputFile), inSuffix) - inConfig, err := afero.ReadFile(fs, inputFile) - require.NoError(t, err) - outConfig, err := hcl.ClusterToAdvancedCluster(inConfig) - if err == nil { - g.Assert(t, testName, outConfig) - } else { - errMsg, found := errMap[testName] - assert.True(t, found, "error not found for test %s", testName) - assert.Contains(t, err.Error(), errMsg) - } + t.Run(testName, func(t *testing.T) { + inConfig, err := afero.ReadFile(fs, inputFile) + require.NoError(t, err) + outConfig, err := hcl.ClusterToAdvancedCluster(inConfig) + if err == nil { + g.Assert(t, testName, outConfig) + } else { + errMsg, found := errMap[testName] + assert.True(t, found, "error not found for test %s", testName) + assert.Contains(t, err.Error(), errMsg) + } + }) } } diff --git a/internal/hcl/testdata/clu2adv/autoscaling.in.tf b/internal/hcl/testdata/clu2adv/autoscaling.in.tf new file mode 100644 index 0000000..2936c41 --- /dev/null +++ b/internal/hcl/testdata/clu2adv/autoscaling.in.tf @@ -0,0 +1,33 @@ +resource "mongodbatlas_cluster" "autoscaling" { + project_id = var.project_id + name = var.cluster_name + disk_size_gb = 100 + num_shards = 1 + cluster_type = "REPLICASET" + + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_WEST_2" + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } + cloud_backup = true + auto_scaling_disk_gb_enabled = true + auto_scaling_compute_enabled = false + auto_scaling_compute_scale_down_enabled = local.scale_down + + //Provider Settings "block" + provider_name = "AWS" + provider_auto_scaling_compute_min_instance_size = "M10" + provider_auto_scaling_compute_max_instance_size = "M40" + provider_instance_size_name = "M20" + + lifecycle { // To simulate if there a new instance size name to avoid scale cluster down to original value + # Note that provider_instance_size_name won't exist in advanced_cluster so it's an error to refer to it, + # but plugin doesn't help here. + ignore_changes = [provider_instance_size_name] + } +} diff --git a/internal/hcl/testdata/clu2adv/autoscaling.out.tf b/internal/hcl/testdata/clu2adv/autoscaling.out.tf new file mode 100644 index 0000000..7fe8cf4 --- /dev/null +++ b/internal/hcl/testdata/clu2adv/autoscaling.out.tf @@ -0,0 +1,36 @@ +resource "mongodbatlas_advanced_cluster" "autoscaling" { + project_id = var.project_id + name = var.cluster_name + cluster_type = "REPLICASET" + + + + lifecycle { // To simulate if there a new instance size name to avoid scale cluster down to original value + # Note that provider_instance_size_name won't exist in advanced_cluster so it's an error to refer to it, + # but plugin doesn't help here. + ignore_changes = [provider_instance_size_name] + } + backup_enabled = true + replication_specs = [{ + region_configs = [{ + provider_name = "AWS" + region_name = "US_WEST_2" + priority = 7 + auto_scaling = { + disk_gb_enabled = true + compute_enabled = false + compute_min_instance_size = "M10" + compute_max_instance_size = "M40" + compute_scale_down_enabled = local.scale_down + } + electable_specs = { + node_count = 3 + instance_size = "M20" + disk_size_gb = 100 + } + }] + }] + + # Generated by atlas-cli-plugin-terraform. + # Please confirm that all references to this resource are updated. +} diff --git a/internal/hcl/testdata/clu2adv/autoscaling_missing_attribute.in.tf b/internal/hcl/testdata/clu2adv/autoscaling_missing_attribute.in.tf new file mode 100644 index 0000000..f8dd8d8 --- /dev/null +++ b/internal/hcl/testdata/clu2adv/autoscaling_missing_attribute.in.tf @@ -0,0 +1,32 @@ +resource "mongodbatlas_cluster" "autoscaling" { + project_id = var.project_id + name = var.cluster_name + disk_size_gb = 100 + num_shards = 1 + cluster_type = "REPLICASET" + + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_WEST_2" + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } + cloud_backup = true + auto_scaling_disk_gb_enabled = true + auto_scaling_compute_enabled = false + auto_scaling_compute_scale_down_enabled = local.scale_down + + //Provider Settings "block" + provider_name = "AWS" + provider_auto_scaling_compute_min_instance_size = "M10" + provider_auto_scaling_compute_max_instance_size = "M40" + + lifecycle { // To simulate if there a new instance size name to avoid scale cluster down to original value + # Note that provider_instance_size_name won't exist in advanced_cluster so it's an error to refer to it, + # but plugin doesn't help here. + ignore_changes = [provider_instance_size_name] + } +} diff --git a/internal/hcl/testdata/clu2adv/errors.json b/internal/hcl/testdata/clu2adv/errors.json index 99d16e1..cb08b6f 100644 --- a/internal/hcl/testdata/clu2adv/errors.json +++ b/internal/hcl/testdata/clu2adv/errors.json @@ -1,4 +1,5 @@ { "configuration_file_error": "failed to parse Terraform config file", - "free_cluster_missing_attribute": "free cluster (because no replication_specs): attribute backing_provider_name not found" + "free_cluster_missing_attribute": "free cluster (because no replication_specs): attribute backing_provider_name not found", + "autoscaling_missing_attribute": "setting replication_specs: attribute provider_instance_size_name not found" }