diff --git a/internal/cli/clu2adv/opts.go b/internal/cli/clu2adv/opts.go index eaf0756..2939d70 100644 --- a/internal/cli/clu2adv/opts.go +++ b/internal/cli/clu2adv/opts.go @@ -3,8 +3,8 @@ package clu2adv import ( "fmt" + "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/convert" "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/file" - "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" "github.com/spf13/afero" ) @@ -30,7 +30,7 @@ func (o *opts) Run() error { if err != nil { return fmt.Errorf("failed to read file %s: %w", o.file, err) } - outConfig, err := hcl.ClusterToAdvancedCluster(inConfig) + outConfig, err := convert.ClusterToAdvancedCluster(inConfig) if err != nil { return err } diff --git a/internal/convert/const_names.go b/internal/convert/const_names.go new file mode 100644 index 0000000..71cd66a --- /dev/null +++ b/internal/convert/const_names.go @@ -0,0 +1,33 @@ +package convert + +const ( + nRepSpecs = "replication_specs" + nConfig = "region_configs" + nConfigSrc = "regions_config" + nElectableSpecs = "electable_specs" + nAutoScaling = "auto_scaling" + nRegionNameSrc = "provider_region_name" + nRegionName = "region_name" + nProviderName = "provider_name" + nBackingProviderName = "backing_provider_name" + nInstanceSizeSrc = "provider_instance_size_name" + nInstanceSize = "instance_size" + nClusterType = "cluster_type" + nPriority = "priority" + nNumShards = "num_shards" + nBackupEnabled = "backup_enabled" + nCloudBackup = "cloud_backup" + nDiskSizeGB = "disk_size_gb" + nDiskGBEnabledSrc = "auto_scaling_disk_gb_enabled" + nComputeEnabledSrc = "auto_scaling_compute_enabled" + nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled" + nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size" + nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size" + nDiskGBEnabled = "disk_gb_enabled" + nComputeEnabled = "compute_enabled" + nComputeScaleDownEnabled = "compute_scale_down_enabled" + nComputeMinInstanceSize = "compute_min_instance_size" + nComputeMaxInstanceSize = "compute_max_instance_size" + nNodeCount = "node_count" + nElectableNodes = "electable_nodes" +) diff --git a/internal/convert/convert.go b/internal/convert/convert.go new file mode 100644 index 0000000..334dea5 --- /dev/null +++ b/internal/convert/convert.go @@ -0,0 +1,209 @@ +package convert + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" + "github.com/zclconf/go-cty/cty" +) + +const ( + resourceType = "resource" + cluster = "mongodbatlas_cluster" + advCluster = "mongodbatlas_advanced_cluster" + valClusterType = "REPLICASET" + valPriority = 7 + errFreeCluster = "free cluster (because no " + nRepSpecs + ")" + errRepSpecs = "setting " + nRepSpecs +) + +type attrVals struct { + req map[string]hclwrite.Tokens + opt map[string]hclwrite.Tokens +} + +// ClusterToAdvancedCluster transforms all mongodbatlas_cluster definitions in a +// Terraform configuration file into mongodbatlas_advanced_cluster schema v2 definitions. +// All other resources and data sources are left untouched. +// Note: hclwrite.Tokens are used instead of cty.Value so expressions like var.region can be preserved. +// cty.Value only supports resolved values. +func ClusterToAdvancedCluster(config []byte) ([]byte, error) { + parser, err := hcl.GetParser(config) + if err != nil { + return nil, err + } + for _, resource := range parser.Body().Blocks() { + labels := resource.Labels() + resourceName := labels[0] + if resource.Type() != resourceType || resourceName != cluster { + continue + } + resourceb := resource.Body() + labels[0] = advCluster + resource.SetLabels(labels) + + if resourceb.FirstMatchingBlock(nRepSpecs, nil) != nil { + err = fillReplicationSpecs(resourceb) + } else { + err = fillFreeTier(resourceb) + } + if err != nil { + return nil, err + } + + resourceb.AppendNewline() + hcl.AppendComment(resourceb, "Generated by atlas-cli-plugin-terraform.") + hcl.AppendComment(resourceb, "Please confirm that all references to this resource are updated.") + } + return parser.Bytes(), nil +} + +// fillFreeTier is the entry point to convert clusters in free tier +func fillFreeTier(resourceb *hclwrite.Body) error { + resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType)) + config := hclwrite.NewEmptyFile() + configb := config.Body() + hcl.SetAttrInt(configb, "priority", valPriority) + if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { + return err + } + if err := hcl.MoveAttr(resourceb, configb, nProviderName, nProviderName, errFreeCluster); err != nil { + return err + } + if err := hcl.MoveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil { + return err + } + electableSpec := hclwrite.NewEmptyFile() + if err := hcl.MoveAttr(resourceb, electableSpec.Body(), nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil { + return err + } + configb.SetAttributeRaw(nElectableSpecs, hcl.TokensObject(electableSpec)) + + repSpecs := hclwrite.NewEmptyFile() + repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArrayObject(config)) + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs)) + return nil +} + +// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier) +func fillReplicationSpecs(resourceb *hclwrite.Body) error { + root, errRoot := popRootAttrs(resourceb, errRepSpecs) + if errRoot != nil { + return errRoot + } + repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil) + configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil) + if configSrc == nil { + return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc) + } + + resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs + // ok to fail as cloud_backup is optional + _ = hcl.MoveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs) + + config, errConfig := getRegionConfigs(configSrc, root) + if errConfig != nil { + return errConfig + } + repSpecs := hclwrite.NewEmptyFile() + repSpecs.Body().SetAttributeRaw(nConfig, config) + resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs)) + + resourceb.RemoveBlock(repSpecsSrc) + return nil +} + +func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { + file := hclwrite.NewEmptyFile() + fileb := file.Body() + fileb.SetAttributeRaw(nProviderName, root.req[nProviderName]) + if err := hcl.MoveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil { + return nil, err + } + if err := hcl.MoveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil { + return nil, err + } + autoScaling := getAutoScalingOpt(root.opt) + if autoScaling != nil { + fileb.SetAttributeRaw(nAutoScaling, autoScaling) + } + electableSpecs, errElect := getElectableSpecs(configSrc, root) + if errElect != nil { + return nil, errElect + } + fileb.SetAttributeRaw(nElectableSpecs, electableSpecs) + return hcl.TokensArrayObject(file), nil +} + +func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { + file := hclwrite.NewEmptyFile() + fileb := file.Body() + if err := hcl.MoveAttr(configSrc.Body(), fileb, nElectableNodes, nNodeCount, errRepSpecs); err != nil { + return nil, err + } + fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc]) + if root.opt[nDiskSizeGB] != nil { + fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB]) + } + return hcl.TokensObject(file), nil +} + +func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens { + var ( + names = [][2]string{ // use slice instead of map to preserve order + {nDiskGBEnabledSrc, nDiskGBEnabled}, + {nComputeEnabledSrc, nComputeEnabled}, + {nComputeMinInstanceSizeSrc, nComputeMinInstanceSize}, + {nComputeMaxInstanceSizeSrc, nComputeMaxInstanceSize}, + {nComputeScaleDownEnabledSrc, nComputeScaleDownEnabled}, + } + file = hclwrite.NewEmptyFile() + found = false + ) + for _, tuple := range names { + src, dst := tuple[0], tuple[1] + if tokens := opt[src]; tokens != nil { + file.Body().SetAttributeRaw(dst, tokens) + found = true + } + } + if !found { + return nil + } + return hcl.TokensObject(file) +} + +// popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them. +func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) { + var ( + reqNames = []string{ + nProviderName, + nInstanceSizeSrc, + } + optNames = []string{ + nDiskSizeGB, + nDiskGBEnabledSrc, + nComputeEnabledSrc, + nComputeMinInstanceSizeSrc, + nComputeMaxInstanceSizeSrc, + nComputeScaleDownEnabledSrc, + } + req = make(map[string]hclwrite.Tokens) + opt = make(map[string]hclwrite.Tokens) + ) + for _, name := range reqNames { + tokens, err := hcl.PopAttr(body, name, errPrefix) + if err != nil { + return attrVals{}, err + } + req[name] = tokens + } + for _, name := range optNames { + tokens, _ := hcl.PopAttr(body, name, errPrefix) + if tokens != nil { + opt[name] = tokens + } + } + return attrVals{req: req, opt: opt}, nil +} diff --git a/internal/hcl/hcl_test.go b/internal/convert/convert_test.go similarity index 89% rename from internal/hcl/hcl_test.go rename to internal/convert/convert_test.go index c53056c..098814a 100644 --- a/internal/hcl/hcl_test.go +++ b/internal/convert/convert_test.go @@ -1,4 +1,4 @@ -package hcl_test +package convert_test import ( "encoding/json" @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl" + "github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/convert" "github.com/sebdah/goldie/v2" "github.com/spf13/afero" "github.com/stretchr/testify/assert" @@ -38,7 +38,7 @@ func TestClusterToAdvancedCluster(t *testing.T) { t.Run(testName, func(t *testing.T) { inConfig, err := afero.ReadFile(fs, inputFile) require.NoError(t, err) - outConfig, err := hcl.ClusterToAdvancedCluster(inConfig) + outConfig, err := convert.ClusterToAdvancedCluster(inConfig) if err == nil { g.Assert(t, testName, outConfig) } else { diff --git a/internal/hcl/testdata/clu2adv/autoscaling.in.tf b/internal/convert/testdata/clu2adv/autoscaling.in.tf similarity index 100% rename from internal/hcl/testdata/clu2adv/autoscaling.in.tf rename to internal/convert/testdata/clu2adv/autoscaling.in.tf diff --git a/internal/hcl/testdata/clu2adv/autoscaling.out.tf b/internal/convert/testdata/clu2adv/autoscaling.out.tf similarity index 100% rename from internal/hcl/testdata/clu2adv/autoscaling.out.tf rename to internal/convert/testdata/clu2adv/autoscaling.out.tf diff --git a/internal/hcl/testdata/clu2adv/autoscaling_missing_attribute.in.tf b/internal/convert/testdata/clu2adv/autoscaling_missing_attribute.in.tf similarity index 100% rename from internal/hcl/testdata/clu2adv/autoscaling_missing_attribute.in.tf rename to internal/convert/testdata/clu2adv/autoscaling_missing_attribute.in.tf diff --git a/internal/hcl/testdata/clu2adv/configuration_file_error.in.tf b/internal/convert/testdata/clu2adv/configuration_file_error.in.tf similarity index 100% rename from internal/hcl/testdata/clu2adv/configuration_file_error.in.tf rename to internal/convert/testdata/clu2adv/configuration_file_error.in.tf diff --git a/internal/hcl/testdata/clu2adv/errors.json b/internal/convert/testdata/clu2adv/errors.json similarity index 100% rename from internal/hcl/testdata/clu2adv/errors.json rename to internal/convert/testdata/clu2adv/errors.json diff --git a/internal/hcl/testdata/clu2adv/free_cluster_missing_attribute.in.tf b/internal/convert/testdata/clu2adv/free_cluster_missing_attribute.in.tf similarity index 100% rename from internal/hcl/testdata/clu2adv/free_cluster_missing_attribute.in.tf rename to internal/convert/testdata/clu2adv/free_cluster_missing_attribute.in.tf diff --git a/internal/hcl/testdata/clu2adv/free_cluster_with_count.in.tf b/internal/convert/testdata/clu2adv/free_cluster_with_count.in.tf similarity index 100% rename from internal/hcl/testdata/clu2adv/free_cluster_with_count.in.tf rename to internal/convert/testdata/clu2adv/free_cluster_with_count.in.tf diff --git a/internal/hcl/testdata/clu2adv/free_cluster_with_count.out.tf b/internal/convert/testdata/clu2adv/free_cluster_with_count.out.tf similarity index 100% rename from internal/hcl/testdata/clu2adv/free_cluster_with_count.out.tf rename to internal/convert/testdata/clu2adv/free_cluster_with_count.out.tf diff --git a/internal/hcl/hcl.go b/internal/hcl/hcl.go index 298b35c..dcbc9d0 100644 --- a/internal/hcl/hcl.go +++ b/internal/hcl/hcl.go @@ -7,205 +7,19 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" - "github.com/zclconf/go-cty/cty" ) -// ClusterToAdvancedCluster transforms all mongodbatlas_cluster definitions in a -// Terraform configuration file into mongodbatlas_advanced_cluster schema v2 definitions. -// All other resources and data sources are left untouched. -// Note: hclwrite.Tokens are used instead of cty.Value so expressions like var.region can be preserved. -// cty.Value only supports resolved values. -func ClusterToAdvancedCluster(config []byte) ([]byte, error) { - parser, err := getParser(config) - if err != nil { - return nil, err - } - for _, resource := range parser.Body().Blocks() { - labels := resource.Labels() - resourceName := labels[0] - if resource.Type() != resourceType || resourceName != cluster { - continue - } - resourceb := resource.Body() - labels[0] = advCluster - resource.SetLabels(labels) - - if resourceb.FirstMatchingBlock(nRepSpecs, nil) != nil { - err = fillReplicationSpecs(resourceb) - } else { - err = fillFreeTier(resourceb) - } - if err != nil { - return nil, err - } - - resourceb.AppendNewline() - appendComment(resourceb, "Generated by atlas-cli-plugin-terraform.") - appendComment(resourceb, "Please confirm that all references to this resource are updated.") - } - return parser.Bytes(), nil -} - -// fillFreeTier is the entry point to convert clusters in free tier -func fillFreeTier(resourceb *hclwrite.Body) error { - resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType)) - config := hclwrite.NewEmptyFile() - configb := config.Body() - setAttrInt(configb, "priority", valPriority) - if err := moveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { - return err - } - if err := moveAttr(resourceb, configb, nProviderName, nProviderName, errFreeCluster); err != nil { - return err - } - if err := moveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil { - return err - } - electableSpec := hclwrite.NewEmptyFile() - if err := moveAttr(resourceb, electableSpec.Body(), nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil { - return err - } - configb.SetAttributeRaw(nElectableSpecs, tokensObject(electableSpec)) - - repSpecs := hclwrite.NewEmptyFile() - repSpecs.Body().SetAttributeRaw(nConfig, tokensArrayObject(config)) - resourceb.SetAttributeRaw(nRepSpecs, tokensArrayObject(repSpecs)) - return nil -} - -// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier) -func fillReplicationSpecs(resourceb *hclwrite.Body) error { - root, errRoot := popRootAttrs(resourceb, errRepSpecs) - if errRoot != nil { - return errRoot - } - repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil) - configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil) - if configSrc == nil { - return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc) - } - - resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs - // ok to fail as cloud_backup is optional - _ = moveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs) - - config, errConfig := getRegionConfigs(configSrc, root) - if errConfig != nil { - return errConfig - } - repSpecs := hclwrite.NewEmptyFile() - repSpecs.Body().SetAttributeRaw(nConfig, config) - resourceb.SetAttributeRaw(nRepSpecs, tokensArrayObject(repSpecs)) - - resourceb.RemoveBlock(repSpecsSrc) - return nil -} - -// popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them. -func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) { - var ( - reqNames = []string{ - nProviderName, - nInstanceSizeSrc, - } - optNames = []string{ - nDiskSizeGB, - nDiskGBEnabledSrc, - nComputeEnabledSrc, - nComputeMinInstanceSizeSrc, - nComputeMaxInstanceSizeSrc, - nComputeScaleDownEnabledSrc, - } - req = make(map[string]hclwrite.Tokens) - opt = make(map[string]hclwrite.Tokens) - ) - for _, name := range reqNames { - tokens, err := popAttr(body, name, errPrefix) - if err != nil { - return attrVals{}, err - } - req[name] = tokens - } - for _, name := range optNames { - tokens, _ := popAttr(body, name, errPrefix) - if tokens != nil { - opt[name] = tokens - } - } - return attrVals{req: req, opt: opt}, nil -} - -func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { - file := hclwrite.NewEmptyFile() - fileb := file.Body() - fileb.SetAttributeRaw(nProviderName, root.req[nProviderName]) - if err := moveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil { - return nil, err - } - if err := moveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil { - return nil, err - } - autoScaling := getAutoScalingOpt(root.opt) - if autoScaling != nil { - fileb.SetAttributeRaw(nAutoScaling, autoScaling) - } - electableSpecs, errElect := getElectableSpecs(configSrc, root) - if errElect != nil { - return nil, errElect - } - fileb.SetAttributeRaw(nElectableSpecs, electableSpecs) - return tokensArrayObject(file), nil -} - -func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { - file := hclwrite.NewEmptyFile() - fileb := file.Body() - if err := moveAttr(configSrc.Body(), fileb, nElectableNodes, nNodeCount, errRepSpecs); err != nil { - return nil, err - } - fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc]) - if root.opt[nDiskSizeGB] != nil { - fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB]) - } - return tokensObject(file), nil -} - -func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens { - var ( - names = [][2]string{ // use slice instead of map to preserve order - {nDiskGBEnabledSrc, nDiskGBEnabled}, - {nComputeEnabledSrc, nComputeEnabled}, - {nComputeMinInstanceSizeSrc, nComputeMinInstanceSize}, - {nComputeMaxInstanceSizeSrc, nComputeMaxInstanceSize}, - {nComputeScaleDownEnabledSrc, nComputeScaleDownEnabled}, - } - file = hclwrite.NewEmptyFile() - found = false - ) - for _, tuple := range names { - src, dst := tuple[0], tuple[1] - if tokens := opt[src]; tokens != nil { - file.Body().SetAttributeRaw(dst, tokens) - found = true - } - } - if !found { - return nil - } - return tokensObject(file) -} - -// popAttr deletes an attribute from fromBody and adds it to toBody. -func moveAttr(fromBody, toBody *hclwrite.Body, fromAttrName, toAttrName, errPrefix string) error { - tokens, err := popAttr(fromBody, fromAttrName, errPrefix) +// MoveAttr deletes an attribute from fromBody and adds it to toBody. +func MoveAttr(fromBody, toBody *hclwrite.Body, fromAttrName, toAttrName, errPrefix string) error { + tokens, err := PopAttr(fromBody, fromAttrName, errPrefix) if err == nil { toBody.SetAttributeRaw(toAttrName, tokens) } return err } -// popAttr deletes an attribute and returns it value. -func popAttr(body *hclwrite.Body, attrName, errPrefix string) (hclwrite.Tokens, error) { +// PopAttr deletes an attribute and returns it value. +func PopAttr(body *hclwrite.Body, attrName, errPrefix string) (hclwrite.Tokens, error) { attr := body.GetAttribute(attrName) if attr == nil { return nil, fmt.Errorf("%s: attribute %s not found", errPrefix, attrName) @@ -215,24 +29,27 @@ func popAttr(body *hclwrite.Body, attrName, errPrefix string) (hclwrite.Tokens, return tokens, nil } -func setAttrInt(body *hclwrite.Body, attrName string, number int) { +// SetAttrInt sets an attribute to a number. +func SetAttrInt(body *hclwrite.Body, attrName string, number int) { tokens := hclwrite.Tokens{ {Type: hclsyntax.TokenNumberLit, Bytes: []byte(strconv.Itoa(number))}, } body.SetAttributeRaw(attrName, tokens) } -func tokensArrayObject(file *hclwrite.File) hclwrite.Tokens { +// TokensArrayObject creates an array with a single object. +func TokensArrayObject(file *hclwrite.File) hclwrite.Tokens { ret := hclwrite.Tokens{ {Type: hclsyntax.TokenOBrack, Bytes: []byte("[")}, } - ret = append(ret, tokensObject(file)...) + ret = append(ret, TokensObject(file)...) ret = append(ret, &hclwrite.Token{Type: hclsyntax.TokenCBrack, Bytes: []byte("]")}) return ret } -func tokensObject(file *hclwrite.File) hclwrite.Tokens { +// TokensObject creates an object. +func TokensObject(file *hclwrite.File) hclwrite.Tokens { ret := hclwrite.Tokens{ {Type: hclsyntax.TokenOBrack, Bytes: []byte("{")}, {Type: hclsyntax.TokenNewline, Bytes: []byte("\n")}, @@ -243,64 +60,19 @@ func tokensObject(file *hclwrite.File) hclwrite.Tokens { return ret } -func appendComment(body *hclwrite.Body, comment string) { +// AppendComment adds a comment at the end of the body. +func AppendComment(body *hclwrite.Body, comment string) { tokens := hclwrite.Tokens{ &hclwrite.Token{Type: hclsyntax.TokenComment, Bytes: []byte("# " + comment + "\n")}, } body.AppendUnstructuredTokens(tokens) } -func getParser(config []byte) (*hclwrite.File, error) { +// GetParser returns a parser for the given config and checks HCL syntax is valid +func GetParser(config []byte) (*hclwrite.File, error) { parser, diags := hclwrite.ParseConfig(config, "", hcl.Pos{Line: 1, Column: 1}) if diags.HasErrors() { return nil, fmt.Errorf("failed to parse Terraform config file: %s", diags.Error()) } return parser, nil } - -type attrVals struct { - req map[string]hclwrite.Tokens - opt map[string]hclwrite.Tokens -} - -const ( - resourceType = "resource" - cluster = "mongodbatlas_cluster" - advCluster = "mongodbatlas_advanced_cluster" - - nRepSpecs = "replication_specs" - nConfig = "region_configs" - nConfigSrc = "regions_config" - nElectableSpecs = "electable_specs" - nAutoScaling = "auto_scaling" - nRegionNameSrc = "provider_region_name" - nRegionName = "region_name" - nProviderName = "provider_name" - nBackingProviderName = "backing_provider_name" - nInstanceSizeSrc = "provider_instance_size_name" - nInstanceSize = "instance_size" - nClusterType = "cluster_type" - nPriority = "priority" - nNumShards = "num_shards" - nBackupEnabled = "backup_enabled" - nCloudBackup = "cloud_backup" - nDiskSizeGB = "disk_size_gb" - nDiskGBEnabledSrc = "auto_scaling_disk_gb_enabled" - nComputeEnabledSrc = "auto_scaling_compute_enabled" - nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled" - nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size" - nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size" - nDiskGBEnabled = "disk_gb_enabled" - nComputeEnabled = "compute_enabled" - nComputeScaleDownEnabled = "compute_scale_down_enabled" - nComputeMinInstanceSize = "compute_min_instance_size" - nComputeMaxInstanceSize = "compute_max_instance_size" - nNodeCount = "node_count" - nElectableNodes = "electable_nodes" - - valClusterType = "REPLICASET" - valPriority = 7 - - errFreeCluster = "free cluster (because no " + nRepSpecs + ")" - errRepSpecs = "setting " + nRepSpecs -)