diff --git a/README.md b/README.md index 87ecd0a..ab00efe 100644 --- a/README.md +++ b/README.md @@ -15,9 +15,10 @@ Install the plugin by running: atlas plugin install github.com/mongodb-labs/atlas-cli-plugin-terraform ``` -## Usage +## Convert cluster to advanced_cluster v2 + +### Usage -### Convert cluster to advanced_cluster v2 If you want to convert a Terraform configuration from `mongodbatlas_cluster` to `mongodbatlas_advanced_cluster` schema v2, use the following command: ```bash atlas terraform clusterToAdvancedCluster --file in.tf --output out.tf @@ -30,6 +31,12 @@ atlas tf clu2adv -f in.tf -o out.tf If you want to overwrite the output file if it exists, or even use the same output file as the input file, use the `--overwriteOutput true` or the `-w` flag. +### Limitations + +- The plugin doesn't support `regions_config` without `electable_nodes` as there can be some issues with `priority` when they only have `analytics_nodes` and/or `electable_nodes`. +- `priority` is required in `regions_config` and must be a resolved number between 7 and 1, e.g. `var.priority` is not supported. This is to allow reordering them by descending priority as this is expected in `mongodbatlas_advanced_cluster`. +- `dynamic` blocks to generate `replication_specs`, `regions_config`, etc. are not supported. + ## Contributing diff --git a/go.mod b/go.mod index 60a3b36..9fe1a46 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/mongodb-labs/atlas-cli-plugin-terraform -go 1.23.4 +go 1.23.6 require ( github.com/hashicorp/hcl/v2 v2.23.0 diff --git a/internal/convert/const_names.go b/internal/convert/const_names.go index 71cd66a..4117049 100644 --- a/internal/convert/const_names.go +++ b/internal/convert/const_names.go @@ -6,6 +6,8 @@ const ( nConfigSrc = "regions_config" nElectableSpecs = "electable_specs" nAutoScaling = "auto_scaling" + nReadOnlySpecs = "read_only_specs" + nAnalyticsSpecs = "analytics_specs" nRegionNameSrc = "provider_region_name" nRegionName = "region_name" nProviderName = "provider_name" @@ -23,11 +25,17 @@ const ( nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled" nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size" nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size" + nEBSVolumeTypeSrc = "provider_volume_type" + nDiskIOPSSrc = "provider_disk_iops" nDiskGBEnabled = "disk_gb_enabled" nComputeEnabled = "compute_enabled" nComputeScaleDownEnabled = "compute_scale_down_enabled" nComputeMinInstanceSize = "compute_min_instance_size" nComputeMaxInstanceSize = "compute_max_instance_size" + nEBSVolumeType = "ebs_volume_type" + nDiskIOPS = "disk_iops" nNodeCount = "node_count" nElectableNodes = "electable_nodes" + nReadOnlyNodes = "read_only_nodes" + nAnalyticsNodes = "analytics_nodes" ) diff --git a/internal/convert/convert.go b/internal/convert/convert.go index 334dea5..4987b53 100644 --- a/internal/convert/convert.go +++ b/internal/convert/convert.go @@ -1,7 +1,9 @@ package convert import ( + "errors" "fmt" + "sort" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" @@ -13,9 +15,12 @@ const ( cluster = "mongodbatlas_cluster" advCluster = "mongodbatlas_advanced_cluster" valClusterType = "REPLICASET" - valPriority = 7 + valMaxPriority = 7 + valMinPriority = 1 errFreeCluster = "free cluster (because no " + nRepSpecs + ")" errRepSpecs = "setting " + nRepSpecs + errConfigs = "setting " + nConfig + errPriority = "setting " + nPriority ) type attrVals struct { @@ -40,6 +45,9 @@ func ClusterToAdvancedCluster(config []byte) ([]byte, error) { continue } resourceb := resource.Body() + if errDyn := checkDynamicBlock(resourceb); errDyn != nil { + return nil, errDyn + } labels[0] = advCluster resource.SetLabels(labels) @@ -64,7 +72,7 @@ func fillFreeTier(resourceb *hclwrite.Body) error { resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType)) config := hclwrite.NewEmptyFile() configb := config.Body() - hcl.SetAttrInt(configb, "priority", valPriority) + hcl.SetAttrInt(configb, nPriority, valMaxPriority) if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { return err } @@ -81,71 +89,113 @@ func fillFreeTier(resourceb *hclwrite.Body) error { configb.SetAttributeRaw(nElectableSpecs, hcl.TokensObject(electableSpec)) repSpecs := hclwrite.NewEmptyFile() - repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArrayObject(config)) - resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs)) + repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArraySingle(config)) + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArraySingle(repSpecs)) return nil } // fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier) func fillReplicationSpecs(resourceb *hclwrite.Body) error { - root, errRoot := popRootAttrs(resourceb, errRepSpecs) + root, errRoot := popRootAttrs(resourceb) if errRoot != nil { return errRoot } - repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil) - configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil) - if configSrc == nil { - return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc) - } - resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs // ok to fail as cloud_backup is optional _ = hcl.MoveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs) - config, errConfig := getRegionConfigs(configSrc, root) - if errConfig != nil { - return errConfig + // at least one replication_specs exists here, if not it would be a free tier cluster + repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil) + if err := checkDynamicBlock(repSpecsSrc.Body()); err != nil { + return err + } + configs, errConfigs := getRegionConfigs(repSpecsSrc, root) + if errConfigs != nil { + return errConfigs } repSpecs := hclwrite.NewEmptyFile() - repSpecs.Body().SetAttributeRaw(nConfig, config) - resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs)) + repSpecs.Body().SetAttributeRaw(nConfig, configs) + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArraySingle(repSpecs)) resourceb.RemoveBlock(repSpecsSrc) return nil } -func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { +func getRegionConfigs(repSpecsSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { + var configs []*hclwrite.File + for { + configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil) + if configSrc == nil { + break + } + config, err := getRegionConfig(configSrc, root) + if err != nil { + return nil, err + } + configs = append(configs, config) + repSpecsSrc.Body().RemoveBlock(configSrc) + } + if len(configs) == 0 { + return nil, fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc) + } + sort.Slice(configs, func(i, j int) bool { + pi, _ := hcl.GetAttrInt(configs[i].Body().GetAttribute(nPriority), errPriority) + pj, _ := hcl.GetAttrInt(configs[j].Body().GetAttribute(nPriority), errPriority) + return pi > pj + }) + return hcl.TokensArray(configs), nil +} + +func getRegionConfig(configSrc *hclwrite.Block, root attrVals) (*hclwrite.File, error) { file := hclwrite.NewEmptyFile() fileb := file.Body() fileb.SetAttributeRaw(nProviderName, root.req[nProviderName]) if err := hcl.MoveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil { return nil, err } - if err := hcl.MoveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil { + if err := setPriority(fileb, configSrc.Body().GetAttribute(nPriority)); err != nil { return nil, err } - autoScaling := getAutoScalingOpt(root.opt) - if autoScaling != nil { - fileb.SetAttributeRaw(nAutoScaling, autoScaling) - } - electableSpecs, errElect := getElectableSpecs(configSrc, root) - if errElect != nil { - return nil, errElect + electableSpecs, errElec := getSpecs(nElectableNodes, configSrc, root) + if errElec != nil { + return nil, errElec } fileb.SetAttributeRaw(nElectableSpecs, electableSpecs) - return hcl.TokensArrayObject(file), nil + if readOnly, _ := getSpecs(nReadOnlyNodes, configSrc, root); readOnly != nil { + fileb.SetAttributeRaw(nReadOnlySpecs, readOnly) + } + if analytics, _ := getSpecs(nAnalyticsNodes, configSrc, root); analytics != nil { + fileb.SetAttributeRaw(nAnalyticsSpecs, analytics) + } + if autoScaling := getAutoScalingOpt(root.opt); autoScaling != nil { + fileb.SetAttributeRaw(nAutoScaling, autoScaling) + } + return file, nil } -func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { - file := hclwrite.NewEmptyFile() - fileb := file.Body() - if err := hcl.MoveAttr(configSrc.Body(), fileb, nElectableNodes, nNodeCount, errRepSpecs); err != nil { - return nil, err +func getSpecs(countName string, configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { + var ( + file = hclwrite.NewEmptyFile() + fileb = file.Body() + count = configSrc.Body().GetAttribute(countName) + ) + if count == nil { + return nil, fmt.Errorf("%s: attribute %s not found", errRepSpecs, countName) + } + if countVal, errVal := hcl.GetAttrInt(count, errRepSpecs); countVal == 0 && errVal == nil { + return nil, fmt.Errorf("%s: attribute %s is 0", errRepSpecs, countName) } + fileb.SetAttributeRaw(nNodeCount, count.Expr().BuildTokens(nil)) fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc]) if root.opt[nDiskSizeGB] != nil { fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB]) } + if root.opt[nEBSVolumeTypeSrc] != nil { + fileb.SetAttributeRaw(nEBSVolumeType, root.opt[nEBSVolumeTypeSrc]) + } + if root.opt[nDiskIOPSSrc] != nil { + fileb.SetAttributeRaw(nDiskIOPS, root.opt[nDiskIOPSSrc]) + } return hcl.TokensObject(file), nil } @@ -174,33 +224,62 @@ func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens { return hcl.TokensObject(file) } +func checkDynamicBlock(body *hclwrite.Body) error { + for _, block := range body.Blocks() { + if block.Type() == "dynamic" { + return errors.New("dynamic blocks are not supported") + } + } + return nil +} + +func setPriority(body *hclwrite.Body, priority *hclwrite.Attribute) error { + if priority == nil { + return fmt.Errorf("%s: %s not found", errRepSpecs, nPriority) + } + valPriority, err := hcl.GetAttrInt(priority, errPriority) + if err != nil { + return err + } + if valPriority < valMinPriority || valPriority > valMaxPriority { + return fmt.Errorf("%s: %s is %d but must be between %d and %d", errPriority, nPriority, valPriority, valMinPriority, valMaxPriority) + } + hcl.SetAttrInt(body, nPriority, valPriority) + return nil +} + // popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them. -func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) { +func popRootAttrs(body *hclwrite.Body) (attrVals, error) { var ( reqNames = []string{ nProviderName, nInstanceSizeSrc, } optNames = []string{ + nElectableNodes, + nReadOnlyNodes, + nAnalyticsNodes, nDiskSizeGB, nDiskGBEnabledSrc, nComputeEnabledSrc, nComputeMinInstanceSizeSrc, nComputeMaxInstanceSizeSrc, nComputeScaleDownEnabledSrc, + nEBSVolumeTypeSrc, + nDiskIOPSSrc, } req = make(map[string]hclwrite.Tokens) opt = make(map[string]hclwrite.Tokens) ) for _, name := range reqNames { - tokens, err := hcl.PopAttr(body, name, errPrefix) + tokens, err := hcl.PopAttr(body, name, errRepSpecs) if err != nil { return attrVals{}, err } req[name] = tokens } for _, name := range optNames { - tokens, _ := hcl.PopAttr(body, name, errPrefix) + tokens, _ := hcl.PopAttr(body, name, errRepSpecs) if tokens != nil { opt[name] = tokens } diff --git a/internal/convert/convert_test.go b/internal/convert/convert_test.go index 098814a..65056fc 100644 --- a/internal/convert/convert_test.go +++ b/internal/convert/convert_test.go @@ -43,7 +43,7 @@ func TestClusterToAdvancedCluster(t *testing.T) { g.Assert(t, testName, outConfig) } else { errMsg, found := errMap[testName] - assert.True(t, found, "error not found for test %s", testName) + assert.True(t, found, "error not found in file %s for test %s, errMsg: %v", errFilename, testName, err) assert.Contains(t, err.Error(), errMsg) } }) diff --git a/internal/convert/testdata/clu2adv/analytics_read_only_all_params.in.tf b/internal/convert/testdata/clu2adv/analytics_read_only_all_params.in.tf new file mode 100644 index 0000000..101c90e --- /dev/null +++ b/internal/convert/testdata/clu2adv/analytics_read_only_all_params.in.tf @@ -0,0 +1,20 @@ +resource "mongodbatlas_cluster" "ar" { + project_id = var.project_id + name = "ar" + cluster_type = "REPLICASET" + provider_name = "AWS" + provider_instance_size_name = "M10" + disk_size_gb = 90 + provider_volume_type = "PROVISIONED" + provider_disk_iops = 100 + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_EAST_1" + priority = 7 + electable_nodes = 3 + analytics_nodes = 2 + read_only_nodes = 1 + } + } +} diff --git a/internal/convert/testdata/clu2adv/analytics_read_only_all_params.out.tf b/internal/convert/testdata/clu2adv/analytics_read_only_all_params.out.tf new file mode 100644 index 0000000..7b66960 --- /dev/null +++ b/internal/convert/testdata/clu2adv/analytics_read_only_all_params.out.tf @@ -0,0 +1,36 @@ +resource "mongodbatlas_advanced_cluster" "ar" { + project_id = var.project_id + name = "ar" + cluster_type = "REPLICASET" + replication_specs = [{ + region_configs = [{ + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + disk_size_gb = 90 + ebs_volume_type = "PROVISIONED" + disk_iops = 100 + } + read_only_specs = { + node_count = 1 + instance_size = "M10" + disk_size_gb = 90 + ebs_volume_type = "PROVISIONED" + disk_iops = 100 + } + analytics_specs = { + node_count = 2 + instance_size = "M10" + disk_size_gb = 90 + ebs_volume_type = "PROVISIONED" + disk_iops = 100 + } + }] + }] + + # Generated by atlas-cli-plugin-terraform. + # Please confirm that all references to this resource are updated. +} diff --git a/internal/convert/testdata/clu2adv/analytics_read_only_min_params.in.tf b/internal/convert/testdata/clu2adv/analytics_read_only_min_params.in.tf new file mode 100644 index 0000000..8aae1e1 --- /dev/null +++ b/internal/convert/testdata/clu2adv/analytics_read_only_min_params.in.tf @@ -0,0 +1,17 @@ +resource "mongodbatlas_cluster" "ar" { + project_id = var.project_id + name = "ar" + cluster_type = "REPLICASET" + provider_name = "AWS" + provider_instance_size_name = "M10" + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_EAST_1" + priority = 7 + electable_nodes = 3 + analytics_nodes = 2 + read_only_nodes = 1 + } + } +} diff --git a/internal/convert/testdata/clu2adv/analytics_read_only_min_params.out.tf b/internal/convert/testdata/clu2adv/analytics_read_only_min_params.out.tf new file mode 100644 index 0000000..0889648 --- /dev/null +++ b/internal/convert/testdata/clu2adv/analytics_read_only_min_params.out.tf @@ -0,0 +1,27 @@ +resource "mongodbatlas_advanced_cluster" "ar" { + project_id = var.project_id + name = "ar" + cluster_type = "REPLICASET" + replication_specs = [{ + region_configs = [{ + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + } + read_only_specs = { + node_count = 1 + instance_size = "M10" + } + analytics_specs = { + node_count = 2 + instance_size = "M10" + } + }] + }] + + # Generated by atlas-cli-plugin-terraform. + # Please confirm that all references to this resource are updated. +} diff --git a/internal/convert/testdata/clu2adv/autoscaling.out.tf b/internal/convert/testdata/clu2adv/autoscaling.out.tf index 7fe8cf4..6b0ef6d 100644 --- a/internal/convert/testdata/clu2adv/autoscaling.out.tf +++ b/internal/convert/testdata/clu2adv/autoscaling.out.tf @@ -16,6 +16,11 @@ resource "mongodbatlas_advanced_cluster" "autoscaling" { provider_name = "AWS" region_name = "US_WEST_2" priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M20" + disk_size_gb = 100 + } auto_scaling = { disk_gb_enabled = true compute_enabled = false @@ -23,11 +28,6 @@ resource "mongodbatlas_advanced_cluster" "autoscaling" { compute_max_instance_size = "M40" compute_scale_down_enabled = local.scale_down } - electable_specs = { - node_count = 3 - instance_size = "M20" - disk_size_gb = 100 - } }] }] diff --git a/internal/convert/testdata/clu2adv/errors.json b/internal/convert/testdata/clu2adv/errors.json index cb08b6f..359b7b0 100644 --- a/internal/convert/testdata/clu2adv/errors.json +++ b/internal/convert/testdata/clu2adv/errors.json @@ -1,5 +1,12 @@ { "configuration_file_error": "failed to parse Terraform config file", "free_cluster_missing_attribute": "free cluster (because no replication_specs): attribute backing_provider_name not found", - "autoscaling_missing_attribute": "setting replication_specs: attribute provider_instance_size_name not found" + "autoscaling_missing_attribute": "setting replication_specs: attribute provider_instance_size_name not found", + "replication_specs_missing_regions_config": "setting replication_specs: regions_config not found", + "regions_config_missing_electable_nodes": "setting replication_specs: attribute electable_nodes not found", + "regions_config_missing_priority": "setting replication_specs: priority not found", + "regions_config_out_of_range_priority": "setting priority: priority is 0 but must be between 1 and 7", + "regions_config_unresolved_priority": "setting priority: failed to evaluate number", + "replication_specs_unsupported_dynamic": "dynamic blocks are not supported", + "regions_config_unsupported_dynamic": "dynamic blocks are not supported" } diff --git a/internal/convert/testdata/clu2adv/multi_region.in.tf b/internal/convert/testdata/clu2adv/multi_region.in.tf new file mode 100644 index 0000000..7898da6 --- /dev/null +++ b/internal/convert/testdata/clu2adv/multi_region.in.tf @@ -0,0 +1,34 @@ +resource "mongodbatlas_cluster" "multi_region" { + project_id = "1234" + name = "cluster-multi-region" + disk_size_gb = 100 + num_shards = 1 + cloud_backup = true + cluster_type = "REPLICASET" + + // Provider Settings "block" + provider_name = "AWS" + provider_instance_size_name = "M10" + + // priorities are not in descending order so regions will be reordered + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_WEST_2" + electable_nodes = 3 + priority = 6 + read_only_nodes = 0 + } + regions_config { + region_name = "US_WEST_1" + electable_nodes = 1 + priority = 5 + } + regions_config { + region_name = "US_EAST_1" + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } +} diff --git a/internal/convert/testdata/clu2adv/multi_region.out.tf b/internal/convert/testdata/clu2adv/multi_region.out.tf new file mode 100644 index 0000000..126205d --- /dev/null +++ b/internal/convert/testdata/clu2adv/multi_region.out.tf @@ -0,0 +1,41 @@ +resource "mongodbatlas_advanced_cluster" "multi_region" { + project_id = "1234" + name = "cluster-multi-region" + cluster_type = "REPLICASET" + + + backup_enabled = true + replication_specs = [{ + region_configs = [{ + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 7 + electable_specs = { + node_count = 3 + instance_size = "M10" + disk_size_gb = 100 + } + }, { + provider_name = "AWS" + region_name = "US_WEST_2" + priority = 6 + electable_specs = { + node_count = 3 + instance_size = "M10" + disk_size_gb = 100 + } + }, { + provider_name = "AWS" + region_name = "US_WEST_1" + priority = 5 + electable_specs = { + node_count = 1 + instance_size = "M10" + disk_size_gb = 100 + } + }] + }] + + # Generated by atlas-cli-plugin-terraform. + # Please confirm that all references to this resource are updated. +} diff --git a/internal/convert/testdata/clu2adv/regions_config_missing_electable_nodes.in.tf b/internal/convert/testdata/clu2adv/regions_config_missing_electable_nodes.in.tf new file mode 100644 index 0000000..f35c870 --- /dev/null +++ b/internal/convert/testdata/clu2adv/regions_config_missing_electable_nodes.in.tf @@ -0,0 +1,17 @@ +resource "mongodbatlas_cluster" "clu" { + project_id = "1234" + name = "clu" + cluster_type = "REPLICASET" + provider_name = "AWS" + provider_instance_size_name = "M10" + + replication_specs { + num_shards = 1 + regions_config { + # missing electable_nodes + region_name = "US_WEST_2" + priority = 7 + read_only_nodes = 2 + } + } +} diff --git a/internal/convert/testdata/clu2adv/regions_config_missing_priority.in.tf b/internal/convert/testdata/clu2adv/regions_config_missing_priority.in.tf new file mode 100644 index 0000000..e4fa955 --- /dev/null +++ b/internal/convert/testdata/clu2adv/regions_config_missing_priority.in.tf @@ -0,0 +1,16 @@ +resource "mongodbatlas_cluster" "clu" { + project_id = "1234" + name = "clu" + cluster_type = "REPLICASET" + provider_name = "AWS" + provider_instance_size_name = "M10" + + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_WEST_2" + # priority = 7 # missing priority + electable_nodes = 2 + } + } +} diff --git a/internal/convert/testdata/clu2adv/regions_config_out_of_range_priority.in.tf b/internal/convert/testdata/clu2adv/regions_config_out_of_range_priority.in.tf new file mode 100644 index 0000000..e1fe04b --- /dev/null +++ b/internal/convert/testdata/clu2adv/regions_config_out_of_range_priority.in.tf @@ -0,0 +1,16 @@ +resource "mongodbatlas_cluster" "clu" { + project_id = "1234" + name = "clu" + cluster_type = "REPLICASET" + provider_name = "AWS" + provider_instance_size_name = "M10" + + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_WEST_2" + priority = 0 # range 1-7 + electable_nodes = 2 + } + } +} diff --git a/internal/convert/testdata/clu2adv/regions_config_unresolved_priority.in.tf b/internal/convert/testdata/clu2adv/regions_config_unresolved_priority.in.tf new file mode 100644 index 0000000..590febe --- /dev/null +++ b/internal/convert/testdata/clu2adv/regions_config_unresolved_priority.in.tf @@ -0,0 +1,16 @@ +resource "mongodbatlas_cluster" "clu" { + project_id = "1234" + name = "clu" + cluster_type = "REPLICASET" + provider_name = "AWS" + provider_instance_size_name = "M10" + + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_WEST_2" + priority = var.priority # unresolved + electable_nodes = 2 + } + } +} diff --git a/internal/convert/testdata/clu2adv/regions_config_unsupported_dynamic.in.tf b/internal/convert/testdata/clu2adv/regions_config_unsupported_dynamic.in.tf new file mode 100644 index 0000000..7934936 --- /dev/null +++ b/internal/convert/testdata/clu2adv/regions_config_unsupported_dynamic.in.tf @@ -0,0 +1,38 @@ +resource "mongodbatlas_cluster" "dynamic_region" { + project_id = var.project_id + name = "dynamic" + num_shards = 1 + cluster_type = "REPLICASET" + provider_name = "AWS" + provider_instance_size_name = "M10" + + replication_specs { + num_shards = 1 + dynamic "regions_config" { + for_each = { + US_WEST_2 = { + electable_nodes = 3 + priority = 6 + read_only_nodes = 0 + } + US_WEST_1 = { + electable_nodes = 1 + priority = 5 + read_only_nodes = 0 + } + US_EAST_1 = { + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } + content { + region_name = regions_config.key + electable_nodes = regions_config.value.electable_nodes + priority = regions_config.value.priority + read_only_nodes = regions_config.value.read_only_nodes + } + + } + } +} diff --git a/internal/convert/testdata/clu2adv/replication_specs_missing_regions_config.in.tf b/internal/convert/testdata/clu2adv/replication_specs_missing_regions_config.in.tf new file mode 100644 index 0000000..b62adf1 --- /dev/null +++ b/internal/convert/testdata/clu2adv/replication_specs_missing_regions_config.in.tf @@ -0,0 +1,17 @@ +resource "mongodbatlas_cluster" "autoscaling" { + project_id = var.project_id + name = var.cluster_name + disk_size_gb = 100 + num_shards = 1 + cluster_type = "REPLICASET" + + replication_specs { + num_shards = 1 + } + + //Provider Settings "block" + provider_name = "AWS" + provider_auto_scaling_compute_min_instance_size = "M10" + provider_auto_scaling_compute_max_instance_size = "M40" + provider_instance_size_name = "M20" +} diff --git a/internal/convert/testdata/clu2adv/replication_specs_unsupported_dynamic.in.tf b/internal/convert/testdata/clu2adv/replication_specs_unsupported_dynamic.in.tf new file mode 100644 index 0000000..f3f5e00 --- /dev/null +++ b/internal/convert/testdata/clu2adv/replication_specs_unsupported_dynamic.in.tf @@ -0,0 +1,29 @@ + +resource "mongodbatlas_cluster" "geo" { + project_id = "66d979971ec97b7de1ef8777" + name = "geo" + cluster_type = "GEOSHARDED" + num_shards = 1 + provider_name = "AWS" + provider_instance_size_name = "M30" + + dynamic "replication_specs" { + for_each = { + "Zone 1" = { + region_name = "US_EAST_1" + }, + "Zone 2" = { + region_name = "US_WEST_2" + } + } + content { + zone_name = replication_specs.key + num_shards = 2 + regions_config { + region_name = replication_specs.value.region_name + electable_nodes = 3 + priority = 7 + } + } + } +} diff --git a/internal/hcl/hcl.go b/internal/hcl/hcl.go index dcbc9d0..2c033f6 100644 --- a/internal/hcl/hcl.go +++ b/internal/hcl/hcl.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" ) // MoveAttr deletes an attribute from fromBody and adds it to toBody. @@ -37,26 +38,53 @@ func SetAttrInt(body *hclwrite.Body, attrName string, number int) { body.SetAttributeRaw(attrName, tokens) } -// TokensArrayObject creates an array with a single object. -func TokensArrayObject(file *hclwrite.File) hclwrite.Tokens { +// GetAttrInt tries to get an attribute value as an int. +func GetAttrInt(attr *hclwrite.Attribute, errPrefix string) (int, error) { + expr, diags := hclsyntax.ParseExpression(attr.Expr().BuildTokens(nil).Bytes(), "", hcl.InitialPos) + if diags.HasErrors() { + return 0, fmt.Errorf("%s: failed to parse number: %s", errPrefix, diags.Error()) + } + val, diags := expr.Value(nil) + if diags.HasErrors() { + return 0, fmt.Errorf("%s: failed to evaluate number: %s", errPrefix, diags.Error()) + } + if !val.Type().Equals(cty.Number) { + return 0, fmt.Errorf("%s: attribute is not a number", errPrefix) + } + num, _ := val.AsBigFloat().Int64() + return int(num), nil +} + +// TokensArray creates an array of objects. +func TokensArray(file []*hclwrite.File) hclwrite.Tokens { ret := hclwrite.Tokens{ {Type: hclsyntax.TokenOBrack, Bytes: []byte("[")}, } - ret = append(ret, TokensObject(file)...) + for i := range file { + ret = append(ret, TokensObject(file[i])...) + if i < len(file)-1 { + ret = append(ret, &hclwrite.Token{Type: hclsyntax.TokenComma, Bytes: []byte(",")}) + } + } ret = append(ret, &hclwrite.Token{Type: hclsyntax.TokenCBrack, Bytes: []byte("]")}) return ret } +// TokensArraySingle creates an array of one object. +func TokensArraySingle(file *hclwrite.File) hclwrite.Tokens { + return TokensArray([]*hclwrite.File{file}) +} + // TokensObject creates an object. func TokensObject(file *hclwrite.File) hclwrite.Tokens { ret := hclwrite.Tokens{ - {Type: hclsyntax.TokenOBrack, Bytes: []byte("{")}, + {Type: hclsyntax.TokenOBrace, Bytes: []byte("{")}, {Type: hclsyntax.TokenNewline, Bytes: []byte("\n")}, } ret = append(ret, file.BuildTokens(nil)...) ret = append(ret, - &hclwrite.Token{Type: hclsyntax.TokenCBrack, Bytes: []byte("}")}) + &hclwrite.Token{Type: hclsyntax.TokenCBrace, Bytes: []byte("}")}) return ret }