Skip to content
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
237 changes: 196 additions & 41 deletions internal/hcl/hcl.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,25 +10,6 @@ import (
"github.com/zclconf/go-cty/cty"
)

const (
resourceType = "resource"
cluster = "mongodbatlas_cluster"
advCluster = "mongodbatlas_advanced_cluster"
nameReplicationSpecs = "replication_specs"
nameRegionConfigs = "region_configs"
nameElectableSpecs = "electable_specs"
nameProviderRegionName = "provider_region_name"
nameRegionName = "region_name"
nameProviderName = "provider_name"
nameBackingProviderName = "backing_provider_name"
nameProviderInstanceSizeName = "provider_instance_size_name"
nameInstanceSize = "instance_size"
nameClusterType = "cluster_type"
namePriority = "priority"

errFreeCluster = "free cluster (because no " + nameReplicationSpecs + ")"
)

// ClusterToAdvancedCluster transforms all mongodbatlas_cluster definitions in a
// Terraform configuration file into mongodbatlas_advanced_cluster schema v2 definitions.
// All other resources and data sources are left untouched.
Expand All @@ -49,10 +30,13 @@ func ClusterToAdvancedCluster(config []byte) ([]byte, error) {
labels[0] = advCluster
resource.SetLabels(labels)

if isFreeTier(resourceBody) {
if err := fillFreeTier(resourceBody); err != nil {
return nil, err
}
if resourceBody.FirstMatchingBlock(nameReplicationSpecs, nil) != nil {
err = fillReplicationSpecs(resourceBody)
} else {
err = fillFreeTier(resourceBody)
}
if err != nil {
return nil, err
}

resourceBody.AppendNewline()
Expand All @@ -62,30 +46,23 @@ func ClusterToAdvancedCluster(config []byte) ([]byte, error) {
return parser.Bytes(), nil
}

func isFreeTier(body *hclwrite.Body) bool {
return body.FirstMatchingBlock(nameReplicationSpecs, nil) == nil
}

// fillFreeTier is the entry point to convert clusters in free tier
func fillFreeTier(body *hclwrite.Body) error {
const (
valClusterType = "REPLICASET"
valPriority = 7
)
body.SetAttributeValue(nameClusterType, cty.StringVal(valClusterType))
regionConfig := hclwrite.NewEmptyFile()
regionConfigBody := regionConfig.Body()
setAttrInt(regionConfigBody, "priority", valPriority)
if err := moveAttribute(nameProviderRegionName, nameRegionName, body, regionConfigBody, errFreeCluster); err != nil {
if err := moveAttr(body, regionConfigBody, nameProviderRegionName, nameRegionName, errFreeCluster); err != nil {
return err
}
if err := moveAttribute(nameProviderName, nameProviderName, body, regionConfigBody, errFreeCluster); err != nil {
if err := moveAttr(body, regionConfigBody, nameProviderName, nameProviderName, errFreeCluster); err != nil {
return err
}
if err := moveAttribute(nameBackingProviderName, nameBackingProviderName, body, regionConfigBody, errFreeCluster); err != nil {
if err := moveAttr(body, regionConfigBody, nameBackingProviderName, nameBackingProviderName, errFreeCluster); err != nil {
return err
}
electableSpec := hclwrite.NewEmptyFile()
if err := moveAttribute(nameProviderInstanceSizeName, nameInstanceSize, body, electableSpec.Body(), errFreeCluster); err != nil {
if err := moveAttr(body, electableSpec.Body(), nameProviderInstanceSizeName, nameInstanceSize, errFreeCluster); err != nil {
return err
}
regionConfigBody.SetAttributeRaw(nameElectableSpecs, tokensObject(electableSpec))
Expand All @@ -96,16 +73,147 @@ func fillFreeTier(body *hclwrite.Body) error {
return nil
}

func moveAttribute(fromAttrName, toAttrName string, fromBody, toBody *hclwrite.Body, errPrefix string) error {
attr := fromBody.GetAttribute(fromAttrName)
if attr == nil {
return fmt.Errorf("%s: attribute %s not found", errPrefix, fromAttrName)
// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier)
func fillReplicationSpecs(body *hclwrite.Body) error {
root, errRoot := extractRootAttrs(body, errRepSpecs)
if errRoot != nil {
return errRoot
}
srcReplicationSpecs := body.FirstMatchingBlock(nameReplicationSpecs, nil)
srcConfig := srcReplicationSpecs.Body().FirstMatchingBlock(nameRegionsConfig, nil)
if srcConfig == nil {
return fmt.Errorf("%s: %s not found", errRepSpecs, nameRegionsConfig)
}

body.RemoveAttribute(nameNumShards) // num_shards in root is not relevant, only in replication_specs
// ok moveAttr to fail as cloud_backup is optional
_ = moveAttr(body, body, nameCloudBackup, nameBackupEnabled, errRepSpecs)

replicationSpec := hclwrite.NewEmptyFile()
config, errConfig := getRegionConfigs(srcConfig, root)
if errConfig != nil {
return errConfig
}
fromBody.RemoveAttribute(fromAttrName)
toBody.SetAttributeRaw(toAttrName, attr.Expr().BuildTokens(nil))
replicationSpec.Body().SetAttributeRaw(nameRegionConfigs, config)
body.SetAttributeRaw(nameReplicationSpecs, tokensArrayObject(replicationSpec))

body.RemoveBlock(srcReplicationSpecs)
return nil
}

// extractRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them.
func extractRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) {
var (
reqNames = []string{
nameProviderName,
nameProviderInstanceSizeName,
}
optNames = []string{
nameDiskSizeGB,
nameAutoScalingDiskGBEnabled,
nameAutoScalingComputeEnabled,
nameProviderAutoScalingComputeMinInstanceSize,
nameProviderAutoScalingComputeMaxInstanceSize,
nameAutoScalingComputeScaleDownEnabled,
}
req = make(map[string]hclwrite.Tokens)
opt = make(map[string]hclwrite.Tokens)
)
for _, name := range reqNames {
tokens, err := extractAttr(body, name, errPrefix)
if err != nil {
return attrVals{}, err
}
req[name] = tokens
}
for _, name := range optNames {
tokens, _ := extractAttr(body, name, errPrefix)
if tokens != nil {
opt[name] = tokens
}
}
return attrVals{req: req, opt: opt}, nil
}

func getRegionConfigs(srcConfig *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
file := hclwrite.NewEmptyFile()
configBody := file.Body()
configBody.SetAttributeRaw(nameProviderName, root.req[nameProviderName])
if err := moveAttr(srcConfig.Body(), configBody, nameRegionName, nameRegionName, errRepSpecs); err != nil {
return nil, err
}
if err := moveAttr(srcConfig.Body(), configBody, namePriority, namePriority, errRepSpecs); err != nil {
return nil, err
}
autoScaling := getAutoScalingOpt(root.opt)
if autoScaling != nil {
configBody.SetAttributeRaw(nameAutoScaling, autoScaling)
}
electableSpecs, errElect := getElectableSpecs(srcConfig, root)
if errElect != nil {
return nil, errElect
}
configBody.SetAttributeRaw(nameElectableSpecs, electableSpecs)
return tokensArrayObject(file), nil
}

func getElectableSpecs(srcConfig *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
file := hclwrite.NewEmptyFile()
if err := moveAttr(srcConfig.Body(), file.Body(), nameElectableNodes, nameNodeCount, errRepSpecs); err != nil {
return nil, err
}
file.Body().SetAttributeRaw(nameInstanceSize, root.req[nameProviderInstanceSizeName])
if root.opt[nameDiskSizeGB] != nil {
file.Body().SetAttributeRaw(nameDiskSizeGB, root.opt[nameDiskSizeGB])
}
return tokensObject(file), nil
}

func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens {
var (
names = [][2]string{ // use slice instead of map to preserve order
{nameAutoScalingDiskGBEnabled, nameDiskGBEnabled},
{nameAutoScalingComputeEnabled, nameComputeEnabled},
{nameProviderAutoScalingComputeMinInstanceSize, nameComputeMinInstanceSize},
{nameProviderAutoScalingComputeMaxInstanceSize, nameComputeMaxInstanceSize},
{nameAutoScalingComputeScaleDownEnabled, nameComputeScaleDownEnabled},
}
file = hclwrite.NewEmptyFile()
found = false
)
for _, tuple := range names {
oldName, newName := tuple[0], tuple[1]
if tokens := opt[oldName]; tokens != nil {
file.Body().SetAttributeRaw(newName, tokens)
found = true
}
}
if !found {
return nil
}
return tokensObject(file)
}

// moveAttr deletes an attribute from fromBody and adds it to toBody.
func moveAttr(fromBody, toBody *hclwrite.Body, fromAttrName, toAttrName, errPrefix string) error {
tokens, err := extractAttr(fromBody, fromAttrName, errPrefix)
if err == nil {
toBody.SetAttributeRaw(toAttrName, tokens)
}
return err
}

// extractAttr deletes an attribute and returns it value.
func extractAttr(body *hclwrite.Body, attrName, errPrefix string) (hclwrite.Tokens, error) {
attr := body.GetAttribute(attrName)
if attr == nil {
return nil, fmt.Errorf("%s: attribute %s not found", errPrefix, attrName)
}
tokens := attr.Expr().BuildTokens(nil)
body.RemoveAttribute(attrName)
return tokens, nil
}

func setAttrInt(body *hclwrite.Body, attrName string, number int) {
tokens := hclwrite.Tokens{
{Type: hclsyntax.TokenNumberLit, Bytes: []byte(strconv.Itoa(number))},
Expand Down Expand Up @@ -148,3 +256,50 @@ func getParser(config []byte) (*hclwrite.File, error) {
}
return parser, nil
}

type attrVals struct {
req map[string]hclwrite.Tokens
opt map[string]hclwrite.Tokens
}

const (
resourceType = "resource"
cluster = "mongodbatlas_cluster"
advCluster = "mongodbatlas_advanced_cluster"

nameReplicationSpecs = "replication_specs"
nameRegionConfigs = "region_configs"
nameRegionsConfig = "regions_config"
nameElectableSpecs = "electable_specs"
nameAutoScaling = "auto_scaling"
nameProviderRegionName = "provider_region_name"
nameRegionName = "region_name"
nameProviderName = "provider_name"
nameBackingProviderName = "backing_provider_name"
nameProviderInstanceSizeName = "provider_instance_size_name"
nameInstanceSize = "instance_size"
nameClusterType = "cluster_type"
namePriority = "priority"
nameNumShards = "num_shards"
nameBackupEnabled = "backup_enabled"
nameCloudBackup = "cloud_backup"
nameDiskSizeGB = "disk_size_gb"
nameAutoScalingDiskGBEnabled = "auto_scaling_disk_gb_enabled"
nameAutoScalingComputeEnabled = "auto_scaling_compute_enabled"
nameAutoScalingComputeScaleDownEnabled = "auto_scaling_compute_scale_down_enabled"
nameProviderAutoScalingComputeMinInstanceSize = "provider_auto_scaling_compute_min_instance_size"
nameProviderAutoScalingComputeMaxInstanceSize = "provider_auto_scaling_compute_max_instance_size"
nameDiskGBEnabled = "disk_gb_enabled"
nameComputeEnabled = "compute_enabled"
nameComputeScaleDownEnabled = "compute_scale_down_enabled"
nameComputeMinInstanceSize = "compute_min_instance_size"
nameComputeMaxInstanceSize = "compute_max_instance_size"
nameNodeCount = "node_count"
nameElectableNodes = "electable_nodes"

valClusterType = "REPLICASET"
valPriority = 7

errFreeCluster = "free cluster (because no " + nameReplicationSpecs + ")"
errRepSpecs = "setting " + nameReplicationSpecs
)
22 changes: 12 additions & 10 deletions internal/hcl/hcl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,17 @@ func TestClusterToAdvancedCluster(t *testing.T) {
assert.NotEmpty(t, inputFiles)
for _, inputFile := range inputFiles {
testName := strings.TrimSuffix(filepath.Base(inputFile), inSuffix)
inConfig, err := afero.ReadFile(fs, inputFile)
require.NoError(t, err)
outConfig, err := hcl.ClusterToAdvancedCluster(inConfig)
if err == nil {
g.Assert(t, testName, outConfig)
} else {
errMsg, found := errMap[testName]
assert.True(t, found, "error not found for test %s", testName)
assert.Contains(t, err.Error(), errMsg)
}
t.Run(testName, func(t *testing.T) {
inConfig, err := afero.ReadFile(fs, inputFile)
require.NoError(t, err)
outConfig, err := hcl.ClusterToAdvancedCluster(inConfig)
if err == nil {
g.Assert(t, testName, outConfig)
} else {
errMsg, found := errMap[testName]
assert.True(t, found, "error not found for test %s", testName)
assert.Contains(t, err.Error(), errMsg)
}
})
}
}
33 changes: 33 additions & 0 deletions internal/hcl/testdata/clu2adv/autoscaling.in.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
resource "mongodbatlas_cluster" "autoscaling" {
project_id = var.project_id
name = var.cluster_name
disk_size_gb = 100
num_shards = 1
cluster_type = "REPLICASET"

replication_specs {
num_shards = 1
regions_config {
region_name = "US_WEST_2"
electable_nodes = 3
priority = 7
read_only_nodes = 0
}
}
cloud_backup = true
auto_scaling_disk_gb_enabled = true
auto_scaling_compute_enabled = false
auto_scaling_compute_scale_down_enabled = local.scale_down

//Provider Settings "block"
provider_name = "AWS"
provider_auto_scaling_compute_min_instance_size = "M10"
provider_auto_scaling_compute_max_instance_size = "M40"
provider_instance_size_name = "M20"

lifecycle { // To simulate if there a new instance size name to avoid scale cluster down to original value
# Note that provider_instance_size_name won't exist in advanced_cluster so it's an error to refer to it,
# but plugin doesn't help here.
ignore_changes = [provider_instance_size_name]
}
}
36 changes: 36 additions & 0 deletions internal/hcl/testdata/clu2adv/autoscaling.out.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
resource "mongodbatlas_advanced_cluster" "autoscaling" {
project_id = var.project_id
name = var.cluster_name
cluster_type = "REPLICASET"



lifecycle { // To simulate if there a new instance size name to avoid scale cluster down to original value
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would prefer having lifecycle at the end, but understand it is because we are appending to the resource block?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

correct, when the functionality is done, we can try to improve these formatting issues

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've created this ticket: CLOUDP-299208

# Note that provider_instance_size_name won't exist in advanced_cluster so it's an error to refer to it,
# but plugin doesn't help here.
ignore_changes = [provider_instance_size_name]
}
backup_enabled = true
Copy link
Collaborator Author

@lantoli lantoli Feb 6, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

note that they are some empty lines that are added where replication_specs and regions_config were, and new attributes are added at the end of current resource , in this case after lifecycle.

will see if it can be improved in following PRs

replication_specs = [{
region_configs = [{
provider_name = "AWS"
region_name = "US_WEST_2"
priority = 7
auto_scaling = {
disk_gb_enabled = true
compute_enabled = false
compute_min_instance_size = "M10"
compute_max_instance_size = "M40"
compute_scale_down_enabled = local.scale_down
}
electable_specs = {
node_count = 3
instance_size = "M20"
disk_size_gb = 100
}
}]
}]

# Generated by atlas-cli-plugin-terraform.
# Please confirm that all references to this resource are updated.
}
Loading