Skip to content

Commit 5e717b7

Browse files
authored
feat: Convert clusters with 1 replication_spec and multiple region_configs (#18)
* failing test * multiple region configs * passing test with fixed order * fix token types * make priority mandatory * extract setPriority * getRegionConfigs * order configs by descending priority * dynamic blocks not supported * read only analytics min * read only and analytics all params * move to var block * go 1.23.6 * typo * countVal refactor * have all getSpecs together * linter
1 parent 350f098 commit 5e717b7

21 files changed

+502
-49
lines changed

README.md

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,10 @@ Install the plugin by running:
1515
atlas plugin install github.com/mongodb-labs/atlas-cli-plugin-terraform
1616
```
1717

18-
## Usage
18+
## Convert cluster to advanced_cluster v2
19+
20+
### Usage
1921

20-
### Convert cluster to advanced_cluster v2
2122
If you want to convert a Terraform configuration from `mongodbatlas_cluster` to `mongodbatlas_advanced_cluster` schema v2, use the following command:
2223
```bash
2324
atlas terraform clusterToAdvancedCluster --file in.tf --output out.tf
@@ -30,6 +31,12 @@ atlas tf clu2adv -f in.tf -o out.tf
3031

3132
If you want to overwrite the output file if it exists, or even use the same output file as the input file, use the `--overwriteOutput true` or the `-w` flag.
3233

34+
### Limitations
35+
36+
- The plugin doesn't support `regions_config` without `electable_nodes` as there can be some issues with `priority` when they only have `analytics_nodes` and/or `electable_nodes`.
37+
- `priority` is required in `regions_config` and must be a resolved number between 7 and 1, e.g. `var.priority` is not supported. This is to allow reordering them by descending priority as this is expected in `mongodbatlas_advanced_cluster`.
38+
- `dynamic` blocks to generate `replication_specs`, `regions_config`, etc. are not supported.
39+
3340

3441
## Contributing
3542

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
module github.com/mongodb-labs/atlas-cli-plugin-terraform
22

3-
go 1.23.4
3+
go 1.23.6
44

55
require (
66
github.com/hashicorp/hcl/v2 v2.23.0

internal/convert/const_names.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@ const (
66
nConfigSrc = "regions_config"
77
nElectableSpecs = "electable_specs"
88
nAutoScaling = "auto_scaling"
9+
nReadOnlySpecs = "read_only_specs"
10+
nAnalyticsSpecs = "analytics_specs"
911
nRegionNameSrc = "provider_region_name"
1012
nRegionName = "region_name"
1113
nProviderName = "provider_name"
@@ -23,11 +25,17 @@ const (
2325
nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled"
2426
nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size"
2527
nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size"
28+
nEBSVolumeTypeSrc = "provider_volume_type"
29+
nDiskIOPSSrc = "provider_disk_iops"
2630
nDiskGBEnabled = "disk_gb_enabled"
2731
nComputeEnabled = "compute_enabled"
2832
nComputeScaleDownEnabled = "compute_scale_down_enabled"
2933
nComputeMinInstanceSize = "compute_min_instance_size"
3034
nComputeMaxInstanceSize = "compute_max_instance_size"
35+
nEBSVolumeType = "ebs_volume_type"
36+
nDiskIOPS = "disk_iops"
3137
nNodeCount = "node_count"
3238
nElectableNodes = "electable_nodes"
39+
nReadOnlyNodes = "read_only_nodes"
40+
nAnalyticsNodes = "analytics_nodes"
3341
)

internal/convert/convert.go

Lines changed: 113 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
package convert
22

33
import (
4+
"errors"
45
"fmt"
6+
"sort"
57

68
"github.com/hashicorp/hcl/v2/hclwrite"
79
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl"
@@ -13,9 +15,12 @@ const (
1315
cluster = "mongodbatlas_cluster"
1416
advCluster = "mongodbatlas_advanced_cluster"
1517
valClusterType = "REPLICASET"
16-
valPriority = 7
18+
valMaxPriority = 7
19+
valMinPriority = 1
1720
errFreeCluster = "free cluster (because no " + nRepSpecs + ")"
1821
errRepSpecs = "setting " + nRepSpecs
22+
errConfigs = "setting " + nConfig
23+
errPriority = "setting " + nPriority
1924
)
2025

2126
type attrVals struct {
@@ -40,6 +45,9 @@ func ClusterToAdvancedCluster(config []byte) ([]byte, error) {
4045
continue
4146
}
4247
resourceb := resource.Body()
48+
if errDyn := checkDynamicBlock(resourceb); errDyn != nil {
49+
return nil, errDyn
50+
}
4351
labels[0] = advCluster
4452
resource.SetLabels(labels)
4553

@@ -64,7 +72,7 @@ func fillFreeTier(resourceb *hclwrite.Body) error {
6472
resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType))
6573
config := hclwrite.NewEmptyFile()
6674
configb := config.Body()
67-
hcl.SetAttrInt(configb, "priority", valPriority)
75+
hcl.SetAttrInt(configb, nPriority, valMaxPriority)
6876
if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil {
6977
return err
7078
}
@@ -81,71 +89,113 @@ func fillFreeTier(resourceb *hclwrite.Body) error {
8189
configb.SetAttributeRaw(nElectableSpecs, hcl.TokensObject(electableSpec))
8290

8391
repSpecs := hclwrite.NewEmptyFile()
84-
repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArrayObject(config))
85-
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs))
92+
repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArraySingle(config))
93+
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArraySingle(repSpecs))
8694
return nil
8795
}
8896

8997
// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier)
9098
func fillReplicationSpecs(resourceb *hclwrite.Body) error {
91-
root, errRoot := popRootAttrs(resourceb, errRepSpecs)
99+
root, errRoot := popRootAttrs(resourceb)
92100
if errRoot != nil {
93101
return errRoot
94102
}
95-
repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil)
96-
configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil)
97-
if configSrc == nil {
98-
return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc)
99-
}
100-
101103
resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs
102104
// ok to fail as cloud_backup is optional
103105
_ = hcl.MoveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs)
104106

105-
config, errConfig := getRegionConfigs(configSrc, root)
106-
if errConfig != nil {
107-
return errConfig
107+
// at least one replication_specs exists here, if not it would be a free tier cluster
108+
repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil)
109+
if err := checkDynamicBlock(repSpecsSrc.Body()); err != nil {
110+
return err
111+
}
112+
configs, errConfigs := getRegionConfigs(repSpecsSrc, root)
113+
if errConfigs != nil {
114+
return errConfigs
108115
}
109116
repSpecs := hclwrite.NewEmptyFile()
110-
repSpecs.Body().SetAttributeRaw(nConfig, config)
111-
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs))
117+
repSpecs.Body().SetAttributeRaw(nConfig, configs)
112118

119+
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArraySingle(repSpecs))
113120
resourceb.RemoveBlock(repSpecsSrc)
114121
return nil
115122
}
116123

117-
func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
124+
func getRegionConfigs(repSpecsSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
125+
var configs []*hclwrite.File
126+
for {
127+
configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil)
128+
if configSrc == nil {
129+
break
130+
}
131+
config, err := getRegionConfig(configSrc, root)
132+
if err != nil {
133+
return nil, err
134+
}
135+
configs = append(configs, config)
136+
repSpecsSrc.Body().RemoveBlock(configSrc)
137+
}
138+
if len(configs) == 0 {
139+
return nil, fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc)
140+
}
141+
sort.Slice(configs, func(i, j int) bool {
142+
pi, _ := hcl.GetAttrInt(configs[i].Body().GetAttribute(nPriority), errPriority)
143+
pj, _ := hcl.GetAttrInt(configs[j].Body().GetAttribute(nPriority), errPriority)
144+
return pi > pj
145+
})
146+
return hcl.TokensArray(configs), nil
147+
}
148+
149+
func getRegionConfig(configSrc *hclwrite.Block, root attrVals) (*hclwrite.File, error) {
118150
file := hclwrite.NewEmptyFile()
119151
fileb := file.Body()
120152
fileb.SetAttributeRaw(nProviderName, root.req[nProviderName])
121153
if err := hcl.MoveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil {
122154
return nil, err
123155
}
124-
if err := hcl.MoveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil {
156+
if err := setPriority(fileb, configSrc.Body().GetAttribute(nPriority)); err != nil {
125157
return nil, err
126158
}
127-
autoScaling := getAutoScalingOpt(root.opt)
128-
if autoScaling != nil {
129-
fileb.SetAttributeRaw(nAutoScaling, autoScaling)
130-
}
131-
electableSpecs, errElect := getElectableSpecs(configSrc, root)
132-
if errElect != nil {
133-
return nil, errElect
159+
electableSpecs, errElec := getSpecs(nElectableNodes, configSrc, root)
160+
if errElec != nil {
161+
return nil, errElec
134162
}
135163
fileb.SetAttributeRaw(nElectableSpecs, electableSpecs)
136-
return hcl.TokensArrayObject(file), nil
164+
if readOnly, _ := getSpecs(nReadOnlyNodes, configSrc, root); readOnly != nil {
165+
fileb.SetAttributeRaw(nReadOnlySpecs, readOnly)
166+
}
167+
if analytics, _ := getSpecs(nAnalyticsNodes, configSrc, root); analytics != nil {
168+
fileb.SetAttributeRaw(nAnalyticsSpecs, analytics)
169+
}
170+
if autoScaling := getAutoScalingOpt(root.opt); autoScaling != nil {
171+
fileb.SetAttributeRaw(nAutoScaling, autoScaling)
172+
}
173+
return file, nil
137174
}
138175

139-
func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
140-
file := hclwrite.NewEmptyFile()
141-
fileb := file.Body()
142-
if err := hcl.MoveAttr(configSrc.Body(), fileb, nElectableNodes, nNodeCount, errRepSpecs); err != nil {
143-
return nil, err
176+
func getSpecs(countName string, configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
177+
var (
178+
file = hclwrite.NewEmptyFile()
179+
fileb = file.Body()
180+
count = configSrc.Body().GetAttribute(countName)
181+
)
182+
if count == nil {
183+
return nil, fmt.Errorf("%s: attribute %s not found", errRepSpecs, countName)
184+
}
185+
if countVal, errVal := hcl.GetAttrInt(count, errRepSpecs); countVal == 0 && errVal == nil {
186+
return nil, fmt.Errorf("%s: attribute %s is 0", errRepSpecs, countName)
144187
}
188+
fileb.SetAttributeRaw(nNodeCount, count.Expr().BuildTokens(nil))
145189
fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc])
146190
if root.opt[nDiskSizeGB] != nil {
147191
fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB])
148192
}
193+
if root.opt[nEBSVolumeTypeSrc] != nil {
194+
fileb.SetAttributeRaw(nEBSVolumeType, root.opt[nEBSVolumeTypeSrc])
195+
}
196+
if root.opt[nDiskIOPSSrc] != nil {
197+
fileb.SetAttributeRaw(nDiskIOPS, root.opt[nDiskIOPSSrc])
198+
}
149199
return hcl.TokensObject(file), nil
150200
}
151201

@@ -174,33 +224,62 @@ func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens {
174224
return hcl.TokensObject(file)
175225
}
176226

227+
func checkDynamicBlock(body *hclwrite.Body) error {
228+
for _, block := range body.Blocks() {
229+
if block.Type() == "dynamic" {
230+
return errors.New("dynamic blocks are not supported")
231+
}
232+
}
233+
return nil
234+
}
235+
236+
func setPriority(body *hclwrite.Body, priority *hclwrite.Attribute) error {
237+
if priority == nil {
238+
return fmt.Errorf("%s: %s not found", errRepSpecs, nPriority)
239+
}
240+
valPriority, err := hcl.GetAttrInt(priority, errPriority)
241+
if err != nil {
242+
return err
243+
}
244+
if valPriority < valMinPriority || valPriority > valMaxPriority {
245+
return fmt.Errorf("%s: %s is %d but must be between %d and %d", errPriority, nPriority, valPriority, valMinPriority, valMaxPriority)
246+
}
247+
hcl.SetAttrInt(body, nPriority, valPriority)
248+
return nil
249+
}
250+
177251
// popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them.
178-
func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) {
252+
func popRootAttrs(body *hclwrite.Body) (attrVals, error) {
179253
var (
180254
reqNames = []string{
181255
nProviderName,
182256
nInstanceSizeSrc,
183257
}
184258
optNames = []string{
259+
nElectableNodes,
260+
nReadOnlyNodes,
261+
nAnalyticsNodes,
185262
nDiskSizeGB,
186263
nDiskGBEnabledSrc,
187264
nComputeEnabledSrc,
188265
nComputeMinInstanceSizeSrc,
189266
nComputeMaxInstanceSizeSrc,
190267
nComputeScaleDownEnabledSrc,
268+
nEBSVolumeTypeSrc,
269+
nDiskIOPSSrc,
191270
}
192271
req = make(map[string]hclwrite.Tokens)
193272
opt = make(map[string]hclwrite.Tokens)
194273
)
195274
for _, name := range reqNames {
196-
tokens, err := hcl.PopAttr(body, name, errPrefix)
275+
tokens, err := hcl.PopAttr(body, name, errRepSpecs)
197276
if err != nil {
198277
return attrVals{}, err
199278
}
200279
req[name] = tokens
201280
}
202281
for _, name := range optNames {
203-
tokens, _ := hcl.PopAttr(body, name, errPrefix)
282+
tokens, _ := hcl.PopAttr(body, name, errRepSpecs)
204283
if tokens != nil {
205284
opt[name] = tokens
206285
}

internal/convert/convert_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ func TestClusterToAdvancedCluster(t *testing.T) {
4343
g.Assert(t, testName, outConfig)
4444
} else {
4545
errMsg, found := errMap[testName]
46-
assert.True(t, found, "error not found for test %s", testName)
46+
assert.True(t, found, "error not found in file %s for test %s, errMsg: %v", errFilename, testName, err)
4747
assert.Contains(t, err.Error(), errMsg)
4848
}
4949
})
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
resource "mongodbatlas_cluster" "ar" {
2+
project_id = var.project_id
3+
name = "ar"
4+
cluster_type = "REPLICASET"
5+
provider_name = "AWS"
6+
provider_instance_size_name = "M10"
7+
disk_size_gb = 90
8+
provider_volume_type = "PROVISIONED"
9+
provider_disk_iops = 100
10+
replication_specs {
11+
num_shards = 1
12+
regions_config {
13+
region_name = "US_EAST_1"
14+
priority = 7
15+
electable_nodes = 3
16+
analytics_nodes = 2
17+
read_only_nodes = 1
18+
}
19+
}
20+
}
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
resource "mongodbatlas_advanced_cluster" "ar" {
2+
project_id = var.project_id
3+
name = "ar"
4+
cluster_type = "REPLICASET"
5+
replication_specs = [{
6+
region_configs = [{
7+
provider_name = "AWS"
8+
region_name = "US_EAST_1"
9+
priority = 7
10+
electable_specs = {
11+
node_count = 3
12+
instance_size = "M10"
13+
disk_size_gb = 90
14+
ebs_volume_type = "PROVISIONED"
15+
disk_iops = 100
16+
}
17+
read_only_specs = {
18+
node_count = 1
19+
instance_size = "M10"
20+
disk_size_gb = 90
21+
ebs_volume_type = "PROVISIONED"
22+
disk_iops = 100
23+
}
24+
analytics_specs = {
25+
node_count = 2
26+
instance_size = "M10"
27+
disk_size_gb = 90
28+
ebs_volume_type = "PROVISIONED"
29+
disk_iops = 100
30+
}
31+
}]
32+
}]
33+
34+
# Generated by atlas-cli-plugin-terraform.
35+
# Please confirm that all references to this resource are updated.
36+
}
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
resource "mongodbatlas_cluster" "ar" {
2+
project_id = var.project_id
3+
name = "ar"
4+
cluster_type = "REPLICASET"
5+
provider_name = "AWS"
6+
provider_instance_size_name = "M10"
7+
replication_specs {
8+
num_shards = 1
9+
regions_config {
10+
region_name = "US_EAST_1"
11+
priority = 7
12+
electable_nodes = 3
13+
analytics_nodes = 2
14+
read_only_nodes = 1
15+
}
16+
}
17+
}

0 commit comments

Comments
 (0)