Skip to content
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@ Install the plugin by running:
atlas plugin install github.com/mongodb-labs/atlas-cli-plugin-terraform
```

## Usage
## Convert cluster to advanced_cluster v2

### Usage

### Convert cluster to advanced_cluster v2
If you want to convert a Terraform configuration from `mongodbatlas_cluster` to `mongodbatlas_advanced_cluster` schema v2, use the following command:
```bash
atlas terraform clusterToAdvancedCluster --file in.tf --output out.tf
Expand All @@ -30,6 +31,12 @@ atlas tf clu2adv -f in.tf -o out.tf

If you want to overwrite the output file if it exists, or even use the same output file as the input file, use the `--overwriteOutput true` or the `-w` flag.

### Limitations

- The plugin doesn't support `regions_config` without `electable_nodes` as there can be some issues with `priority` when they only have `analytics_nodes` and/or `electable_nodes`.
- `priority` is required in `regions_config` and must be a resolved number between 7 and 1, e.g. `var.priority` is not supported. This is to allow reordering them by descending priority as this is expected in `mongodbatlas_advanced_cluster`.
- `dynamic` blocks to generate `replication_specs`, `regions_config`, etc. are not supported.


## Contributing

Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module github.com/mongodb-labs/atlas-cli-plugin-terraform

go 1.23.4
go 1.23.6

require (
github.com/hashicorp/hcl/v2 v2.23.0
Expand Down
8 changes: 8 additions & 0 deletions internal/convert/const_names.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ const (
nConfigSrc = "regions_config"
nElectableSpecs = "electable_specs"
nAutoScaling = "auto_scaling"
nReadOnlySpecs = "read_only_specs"
nAnalyticsSpecs = "analytics_specs"
nRegionNameSrc = "provider_region_name"
nRegionName = "region_name"
nProviderName = "provider_name"
Expand All @@ -23,11 +25,17 @@ const (
nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled"
nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size"
nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size"
nEBSVolumeTypeSrc = "provider_volume_type"
nDiskIOPSSrc = "provider_disk_iops"
nDiskGBEnabled = "disk_gb_enabled"
nComputeEnabled = "compute_enabled"
nComputeScaleDownEnabled = "compute_scale_down_enabled"
nComputeMinInstanceSize = "compute_min_instance_size"
nComputeMaxInstanceSize = "compute_max_instance_size"
nEBSVolumeType = "ebs_volume_type"
nDiskIOPS = "disk_iops"
nNodeCount = "node_count"
nElectableNodes = "electable_nodes"
nReadOnlyNodes = "read_only_nodes"
nAnalyticsNodes = "analytics_nodes"
)
151 changes: 125 additions & 26 deletions internal/convert/convert.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
package convert

import (
"errors"
"fmt"
"sort"

"github.com/hashicorp/hcl/v2/hclwrite"
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl"
Expand All @@ -13,9 +15,12 @@ const (
cluster = "mongodbatlas_cluster"
advCluster = "mongodbatlas_advanced_cluster"
valClusterType = "REPLICASET"
valPriority = 7
valMaxPriority = 7
valMinPriority = 1
errFreeCluster = "free cluster (because no " + nRepSpecs + ")"
errRepSpecs = "setting " + nRepSpecs
errConfigs = "setting " + nConfig
errPriority = "setting " + nPriority
)

type attrVals struct {
Expand All @@ -40,6 +45,9 @@ func ClusterToAdvancedCluster(config []byte) ([]byte, error) {
continue
}
resourceb := resource.Body()
if errDyn := checkDynamicBlock(resourceb); errDyn != nil {
return nil, errDyn
}
labels[0] = advCluster
resource.SetLabels(labels)

Expand All @@ -64,7 +72,7 @@ func fillFreeTier(resourceb *hclwrite.Body) error {
resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType))
config := hclwrite.NewEmptyFile()
configb := config.Body()
hcl.SetAttrInt(configb, "priority", valPriority)
hcl.SetAttrInt(configb, nPriority, valMaxPriority)
if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil {
return err
}
Expand All @@ -81,59 +89,88 @@ func fillFreeTier(resourceb *hclwrite.Body) error {
configb.SetAttributeRaw(nElectableSpecs, hcl.TokensObject(electableSpec))

repSpecs := hclwrite.NewEmptyFile()
repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArrayObject(config))
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs))
repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArraySingle(config))
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArraySingle(repSpecs))
return nil
}

// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier)
func fillReplicationSpecs(resourceb *hclwrite.Body) error {
root, errRoot := popRootAttrs(resourceb, errRepSpecs)
root, errRoot := popRootAttrs(resourceb)
if errRoot != nil {
return errRoot
}
repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil)
configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil)
if configSrc == nil {
return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc)
}

resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs
// ok to fail as cloud_backup is optional
_ = hcl.MoveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs)

config, errConfig := getRegionConfigs(configSrc, root)
if errConfig != nil {
return errConfig
// at least one replication_specs exists here, if not it would be a free tier cluster
repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil)
if err := checkDynamicBlock(repSpecsSrc.Body()); err != nil {
return err
}
configs, errConfigs := getRegionConfigs(repSpecsSrc, root)
if errConfigs != nil {
return errConfigs
}
repSpecs := hclwrite.NewEmptyFile()
repSpecs.Body().SetAttributeRaw(nConfig, config)
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs))
repSpecs.Body().SetAttributeRaw(nConfig, configs)

resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArraySingle(repSpecs))
resourceb.RemoveBlock(repSpecsSrc)
return nil
}

func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
func getRegionConfigs(repSpecsSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
var configs []*hclwrite.File
for {
configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil)
if configSrc == nil {
break
}
config, err := getRegionConfig(configSrc, root)
if err != nil {
return nil, err
}
configs = append(configs, config)
repSpecsSrc.Body().RemoveBlock(configSrc)
}
if len(configs) == 0 {
return nil, fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc)
}
sort.Slice(configs, func(i, j int) bool {
pi, _ := hcl.GetAttrInt(configs[i].Body().GetAttribute(nPriority), errPriority)
pj, _ := hcl.GetAttrInt(configs[j].Body().GetAttribute(nPriority), errPriority)
return pi > pj
})
return hcl.TokensArray(configs), nil
}

func getRegionConfig(configSrc *hclwrite.Block, root attrVals) (*hclwrite.File, error) {
file := hclwrite.NewEmptyFile()
fileb := file.Body()
fileb.SetAttributeRaw(nProviderName, root.req[nProviderName])
if err := hcl.MoveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil {
return nil, err
}
if err := hcl.MoveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil {
if err := setPriority(fileb, configSrc.Body().GetAttribute(nPriority)); err != nil {
return nil, err
}
autoScaling := getAutoScalingOpt(root.opt)
if autoScaling != nil {
fileb.SetAttributeRaw(nAutoScaling, autoScaling)
}
electableSpecs, errElect := getElectableSpecs(configSrc, root)
if errElect != nil {
return nil, errElect
}
fileb.SetAttributeRaw(nElectableSpecs, electableSpecs)
return hcl.TokensArrayObject(file), nil
if readOnly := getReadOnlyAnalyticsOpt(nReadOnlyNodes, configSrc, root); readOnly != nil {
fileb.SetAttributeRaw(nReadOnlySpecs, readOnly)
}
if analytics := getReadOnlyAnalyticsOpt(nAnalyticsNodes, configSrc, root); analytics != nil {
fileb.SetAttributeRaw(nAnalyticsSpecs, analytics)
}
if autoScaling := getAutoScalingOpt(root.opt); autoScaling != nil {
fileb.SetAttributeRaw(nAutoScaling, autoScaling)
}
return file, nil
}

func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
Expand All @@ -146,9 +183,42 @@ func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Token
if root.opt[nDiskSizeGB] != nil {
fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB])
}
if root.opt[nEBSVolumeTypeSrc] != nil {
fileb.SetAttributeRaw(nEBSVolumeType, root.opt[nEBSVolumeTypeSrc])
}
if root.opt[nDiskIOPSSrc] != nil {
fileb.SetAttributeRaw(nDiskIOPS, root.opt[nDiskIOPSSrc])
}
return hcl.TokensObject(file), nil
}

func getReadOnlyAnalyticsOpt(countName string, configSrc *hclwrite.Block, root attrVals) hclwrite.Tokens {
var (
file = hclwrite.NewEmptyFile()
fileb = file.Body()
count = configSrc.Body().GetAttribute(countName)
)
if count == nil {
return nil
}
// don't include if read_only_nodes or analytics_nodes is 0
if countVal, errVal := hcl.GetAttrInt(count, errRepSpecs); countVal == 0 && errVal == nil {
return nil
}
fileb.SetAttributeRaw(nNodeCount, count.Expr().BuildTokens(nil))
fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc])
if root.opt[nDiskSizeGB] != nil {
fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB])
}
if root.opt[nEBSVolumeTypeSrc] != nil {
fileb.SetAttributeRaw(nEBSVolumeType, root.opt[nEBSVolumeTypeSrc])
}
if root.opt[nDiskIOPSSrc] != nil {
fileb.SetAttributeRaw(nDiskIOPS, root.opt[nDiskIOPSSrc])
}
return hcl.TokensObject(file)
}

func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens {
var (
names = [][2]string{ // use slice instead of map to preserve order
Expand All @@ -174,33 +244,62 @@ func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens {
return hcl.TokensObject(file)
}

func checkDynamicBlock(body *hclwrite.Body) error {
for _, block := range body.Blocks() {
if block.Type() == "dynamic" {
return errors.New("dynamic blocks are not supported")
}
}
return nil
}

func setPriority(body *hclwrite.Body, priority *hclwrite.Attribute) error {
if priority == nil {
return fmt.Errorf("%s: %s not found", errRepSpecs, nPriority)
}
valPriority, err := hcl.GetAttrInt(priority, errPriority)
if err != nil {
return err
}
if valPriority < valMinPriority || valPriority > valMaxPriority {
return fmt.Errorf("%s: %s is %d but must be between %d and %d", errPriority, nPriority, valPriority, valMinPriority, valMaxPriority)
}
hcl.SetAttrInt(body, nPriority, valPriority)
return nil
}

// popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them.
func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) {
func popRootAttrs(body *hclwrite.Body) (attrVals, error) {
var (
reqNames = []string{
nProviderName,
nInstanceSizeSrc,
}
optNames = []string{
nElectableNodes,
nReadOnlyNodes,
nAnalyticsNodes,
nDiskSizeGB,
nDiskGBEnabledSrc,
nComputeEnabledSrc,
nComputeMinInstanceSizeSrc,
nComputeMaxInstanceSizeSrc,
nComputeScaleDownEnabledSrc,
nEBSVolumeTypeSrc,
nDiskIOPSSrc,
}
req = make(map[string]hclwrite.Tokens)
opt = make(map[string]hclwrite.Tokens)
)
for _, name := range reqNames {
tokens, err := hcl.PopAttr(body, name, errPrefix)
tokens, err := hcl.PopAttr(body, name, errRepSpecs)
if err != nil {
return attrVals{}, err
}
req[name] = tokens
}
for _, name := range optNames {
tokens, _ := hcl.PopAttr(body, name, errPrefix)
tokens, _ := hcl.PopAttr(body, name, errRepSpecs)
if tokens != nil {
opt[name] = tokens
}
Expand Down
2 changes: 1 addition & 1 deletion internal/convert/convert_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func TestClusterToAdvancedCluster(t *testing.T) {
g.Assert(t, testName, outConfig)
} else {
errMsg, found := errMap[testName]
assert.True(t, found, "error not found for test %s", testName)
assert.True(t, found, "error not found in file %s for test %s", errFilename, testName)
assert.Contains(t, err.Error(), errMsg)
}
})
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
resource "mongodbatlas_cluster" "ar" {
project_id = var.project_id
name = "ar"
cluster_type = "REPLICASET"
provider_name = "AWS"
provider_instance_size_name = "M10"
disk_size_gb = 90
provider_volume_type = "PROVISIONED"
provider_disk_iops = 100
replication_specs {
num_shards = 1
regions_config {
region_name = "US_EAST_1"
priority = 7
electable_nodes = 3
analytics_nodes = 2
read_only_nodes = 1
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
resource "mongodbatlas_advanced_cluster" "ar" {
project_id = var.project_id
name = "ar"
cluster_type = "REPLICASET"
replication_specs = [{
region_configs = [{
provider_name = "AWS"
region_name = "US_EAST_1"
priority = 7
electable_specs = {
node_count = 3
instance_size = "M10"
disk_size_gb = 90
ebs_volume_type = "PROVISIONED"
disk_iops = 100
}
read_only_specs = {
node_count = 1
instance_size = "M10"
disk_size_gb = 90
ebs_volume_type = "PROVISIONED"
disk_iops = 100
}
analytics_specs = {
node_count = 2
instance_size = "M10"
disk_size_gb = 90
ebs_volume_type = "PROVISIONED"
disk_iops = 100
}
}]
}]

# Generated by atlas-cli-plugin-terraform.
# Please confirm that all references to this resource are updated.
}
Loading