Skip to content

Commit b7e4ee3

Browse files
authored
feat: Converts clusters with one replication_specs, regions_config and shard (#16)
* autoscaling test * subtests * create fillReplicationSpecs * moveAttribute & extractAttr * create rootAttrs * using maps * split req and opt params * autoscaling * make vars local to funcs * missing attribute error * passing test * refactor fillReplicationSpecs extracting to helper funcs * refactor Body() * rename consts * small refactor * popAttr * rename resourceb * resourceb in ClusterToAdvancedCluster * popRootAttrs
1 parent 6c32a9e commit b7e4ee3

File tree

6 files changed

+325
-65
lines changed

6 files changed

+325
-65
lines changed

internal/hcl/hcl.go

Lines changed: 210 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -10,25 +10,6 @@ import (
1010
"github.com/zclconf/go-cty/cty"
1111
)
1212

13-
const (
14-
resourceType = "resource"
15-
cluster = "mongodbatlas_cluster"
16-
advCluster = "mongodbatlas_advanced_cluster"
17-
nameReplicationSpecs = "replication_specs"
18-
nameRegionConfigs = "region_configs"
19-
nameElectableSpecs = "electable_specs"
20-
nameProviderRegionName = "provider_region_name"
21-
nameRegionName = "region_name"
22-
nameProviderName = "provider_name"
23-
nameBackingProviderName = "backing_provider_name"
24-
nameProviderInstanceSizeName = "provider_instance_size_name"
25-
nameInstanceSize = "instance_size"
26-
nameClusterType = "cluster_type"
27-
namePriority = "priority"
28-
29-
errFreeCluster = "free cluster (because no " + nameReplicationSpecs + ")"
30-
)
31-
3213
// ClusterToAdvancedCluster transforms all mongodbatlas_cluster definitions in a
3314
// Terraform configuration file into mongodbatlas_advanced_cluster schema v2 definitions.
3415
// All other resources and data sources are left untouched.
@@ -45,67 +26,195 @@ func ClusterToAdvancedCluster(config []byte) ([]byte, error) {
4526
if resource.Type() != resourceType || resourceName != cluster {
4627
continue
4728
}
48-
resourceBody := resource.Body()
29+
resourceb := resource.Body()
4930
labels[0] = advCluster
5031
resource.SetLabels(labels)
5132

52-
if isFreeTier(resourceBody) {
53-
if err := fillFreeTier(resourceBody); err != nil {
54-
return nil, err
55-
}
33+
if resourceb.FirstMatchingBlock(nRepSpecs, nil) != nil {
34+
err = fillReplicationSpecs(resourceb)
35+
} else {
36+
err = fillFreeTier(resourceb)
37+
}
38+
if err != nil {
39+
return nil, err
5640
}
5741

58-
resourceBody.AppendNewline()
59-
appendComment(resourceBody, "Generated by atlas-cli-plugin-terraform.")
60-
appendComment(resourceBody, "Please confirm that all references to this resource are updated.")
42+
resourceb.AppendNewline()
43+
appendComment(resourceb, "Generated by atlas-cli-plugin-terraform.")
44+
appendComment(resourceb, "Please confirm that all references to this resource are updated.")
6145
}
6246
return parser.Bytes(), nil
6347
}
6448

65-
func isFreeTier(body *hclwrite.Body) bool {
66-
return body.FirstMatchingBlock(nameReplicationSpecs, nil) == nil
67-
}
68-
69-
func fillFreeTier(body *hclwrite.Body) error {
70-
const (
71-
valClusterType = "REPLICASET"
72-
valPriority = 7
73-
)
74-
body.SetAttributeValue(nameClusterType, cty.StringVal(valClusterType))
75-
regionConfig := hclwrite.NewEmptyFile()
76-
regionConfigBody := regionConfig.Body()
77-
setAttrInt(regionConfigBody, "priority", valPriority)
78-
if err := moveAttribute(nameProviderRegionName, nameRegionName, body, regionConfigBody, errFreeCluster); err != nil {
49+
// fillFreeTier is the entry point to convert clusters in free tier
50+
func fillFreeTier(resourceb *hclwrite.Body) error {
51+
resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType))
52+
config := hclwrite.NewEmptyFile()
53+
configb := config.Body()
54+
setAttrInt(configb, "priority", valPriority)
55+
if err := moveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil {
7956
return err
8057
}
81-
if err := moveAttribute(nameProviderName, nameProviderName, body, regionConfigBody, errFreeCluster); err != nil {
58+
if err := moveAttr(resourceb, configb, nProviderName, nProviderName, errFreeCluster); err != nil {
8259
return err
8360
}
84-
if err := moveAttribute(nameBackingProviderName, nameBackingProviderName, body, regionConfigBody, errFreeCluster); err != nil {
61+
if err := moveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil {
8562
return err
8663
}
8764
electableSpec := hclwrite.NewEmptyFile()
88-
if err := moveAttribute(nameProviderInstanceSizeName, nameInstanceSize, body, electableSpec.Body(), errFreeCluster); err != nil {
65+
if err := moveAttr(resourceb, electableSpec.Body(), nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil {
8966
return err
9067
}
91-
regionConfigBody.SetAttributeRaw(nameElectableSpecs, tokensObject(electableSpec))
68+
configb.SetAttributeRaw(nElectableSpecs, tokensObject(electableSpec))
9269

93-
replicationSpec := hclwrite.NewEmptyFile()
94-
replicationSpec.Body().SetAttributeRaw(nameRegionConfigs, tokensArrayObject(regionConfig))
95-
body.SetAttributeRaw(nameReplicationSpecs, tokensArrayObject(replicationSpec))
70+
repSpecs := hclwrite.NewEmptyFile()
71+
repSpecs.Body().SetAttributeRaw(nConfig, tokensArrayObject(config))
72+
resourceb.SetAttributeRaw(nRepSpecs, tokensArrayObject(repSpecs))
9673
return nil
9774
}
9875

99-
func moveAttribute(fromAttrName, toAttrName string, fromBody, toBody *hclwrite.Body, errPrefix string) error {
100-
attr := fromBody.GetAttribute(fromAttrName)
101-
if attr == nil {
102-
return fmt.Errorf("%s: attribute %s not found", errPrefix, fromAttrName)
76+
// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier)
77+
func fillReplicationSpecs(resourceb *hclwrite.Body) error {
78+
root, errRoot := popRootAttrs(resourceb, errRepSpecs)
79+
if errRoot != nil {
80+
return errRoot
10381
}
104-
fromBody.RemoveAttribute(fromAttrName)
105-
toBody.SetAttributeRaw(toAttrName, attr.Expr().BuildTokens(nil))
82+
repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil)
83+
configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil)
84+
if configSrc == nil {
85+
return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc)
86+
}
87+
88+
resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs
89+
// ok to fail as cloud_backup is optional
90+
_ = moveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs)
91+
92+
config, errConfig := getRegionConfigs(configSrc, root)
93+
if errConfig != nil {
94+
return errConfig
95+
}
96+
repSpecs := hclwrite.NewEmptyFile()
97+
repSpecs.Body().SetAttributeRaw(nConfig, config)
98+
resourceb.SetAttributeRaw(nRepSpecs, tokensArrayObject(repSpecs))
99+
100+
resourceb.RemoveBlock(repSpecsSrc)
106101
return nil
107102
}
108103

104+
// popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them.
105+
func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) {
106+
var (
107+
reqNames = []string{
108+
nProviderName,
109+
nInstanceSizeSrc,
110+
}
111+
optNames = []string{
112+
nDiskSizeGB,
113+
nDiskGBEnabledSrc,
114+
nComputeEnabledSrc,
115+
nComputeMinInstanceSizeSrc,
116+
nComputeMaxInstanceSizeSrc,
117+
nComputeScaleDownEnabledSrc,
118+
}
119+
req = make(map[string]hclwrite.Tokens)
120+
opt = make(map[string]hclwrite.Tokens)
121+
)
122+
for _, name := range reqNames {
123+
tokens, err := popAttr(body, name, errPrefix)
124+
if err != nil {
125+
return attrVals{}, err
126+
}
127+
req[name] = tokens
128+
}
129+
for _, name := range optNames {
130+
tokens, _ := popAttr(body, name, errPrefix)
131+
if tokens != nil {
132+
opt[name] = tokens
133+
}
134+
}
135+
return attrVals{req: req, opt: opt}, nil
136+
}
137+
138+
func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
139+
file := hclwrite.NewEmptyFile()
140+
fileb := file.Body()
141+
fileb.SetAttributeRaw(nProviderName, root.req[nProviderName])
142+
if err := moveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil {
143+
return nil, err
144+
}
145+
if err := moveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil {
146+
return nil, err
147+
}
148+
autoScaling := getAutoScalingOpt(root.opt)
149+
if autoScaling != nil {
150+
fileb.SetAttributeRaw(nAutoScaling, autoScaling)
151+
}
152+
electableSpecs, errElect := getElectableSpecs(configSrc, root)
153+
if errElect != nil {
154+
return nil, errElect
155+
}
156+
fileb.SetAttributeRaw(nElectableSpecs, electableSpecs)
157+
return tokensArrayObject(file), nil
158+
}
159+
160+
func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
161+
file := hclwrite.NewEmptyFile()
162+
fileb := file.Body()
163+
if err := moveAttr(configSrc.Body(), fileb, nElectableNodes, nNodeCount, errRepSpecs); err != nil {
164+
return nil, err
165+
}
166+
fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc])
167+
if root.opt[nDiskSizeGB] != nil {
168+
fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB])
169+
}
170+
return tokensObject(file), nil
171+
}
172+
173+
func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens {
174+
var (
175+
names = [][2]string{ // use slice instead of map to preserve order
176+
{nDiskGBEnabledSrc, nDiskGBEnabled},
177+
{nComputeEnabledSrc, nComputeEnabled},
178+
{nComputeMinInstanceSizeSrc, nComputeMinInstanceSize},
179+
{nComputeMaxInstanceSizeSrc, nComputeMaxInstanceSize},
180+
{nComputeScaleDownEnabledSrc, nComputeScaleDownEnabled},
181+
}
182+
file = hclwrite.NewEmptyFile()
183+
found = false
184+
)
185+
for _, tuple := range names {
186+
src, dst := tuple[0], tuple[1]
187+
if tokens := opt[src]; tokens != nil {
188+
file.Body().SetAttributeRaw(dst, tokens)
189+
found = true
190+
}
191+
}
192+
if !found {
193+
return nil
194+
}
195+
return tokensObject(file)
196+
}
197+
198+
// popAttr deletes an attribute from fromBody and adds it to toBody.
199+
func moveAttr(fromBody, toBody *hclwrite.Body, fromAttrName, toAttrName, errPrefix string) error {
200+
tokens, err := popAttr(fromBody, fromAttrName, errPrefix)
201+
if err == nil {
202+
toBody.SetAttributeRaw(toAttrName, tokens)
203+
}
204+
return err
205+
}
206+
207+
// popAttr deletes an attribute and returns it value.
208+
func popAttr(body *hclwrite.Body, attrName, errPrefix string) (hclwrite.Tokens, error) {
209+
attr := body.GetAttribute(attrName)
210+
if attr == nil {
211+
return nil, fmt.Errorf("%s: attribute %s not found", errPrefix, attrName)
212+
}
213+
tokens := attr.Expr().BuildTokens(nil)
214+
body.RemoveAttribute(attrName)
215+
return tokens, nil
216+
}
217+
109218
func setAttrInt(body *hclwrite.Body, attrName string, number int) {
110219
tokens := hclwrite.Tokens{
111220
{Type: hclsyntax.TokenNumberLit, Bytes: []byte(strconv.Itoa(number))},
@@ -148,3 +257,50 @@ func getParser(config []byte) (*hclwrite.File, error) {
148257
}
149258
return parser, nil
150259
}
260+
261+
type attrVals struct {
262+
req map[string]hclwrite.Tokens
263+
opt map[string]hclwrite.Tokens
264+
}
265+
266+
const (
267+
resourceType = "resource"
268+
cluster = "mongodbatlas_cluster"
269+
advCluster = "mongodbatlas_advanced_cluster"
270+
271+
nRepSpecs = "replication_specs"
272+
nConfig = "region_configs"
273+
nConfigSrc = "regions_config"
274+
nElectableSpecs = "electable_specs"
275+
nAutoScaling = "auto_scaling"
276+
nRegionNameSrc = "provider_region_name"
277+
nRegionName = "region_name"
278+
nProviderName = "provider_name"
279+
nBackingProviderName = "backing_provider_name"
280+
nInstanceSizeSrc = "provider_instance_size_name"
281+
nInstanceSize = "instance_size"
282+
nClusterType = "cluster_type"
283+
nPriority = "priority"
284+
nNumShards = "num_shards"
285+
nBackupEnabled = "backup_enabled"
286+
nCloudBackup = "cloud_backup"
287+
nDiskSizeGB = "disk_size_gb"
288+
nDiskGBEnabledSrc = "auto_scaling_disk_gb_enabled"
289+
nComputeEnabledSrc = "auto_scaling_compute_enabled"
290+
nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled"
291+
nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size"
292+
nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size"
293+
nDiskGBEnabled = "disk_gb_enabled"
294+
nComputeEnabled = "compute_enabled"
295+
nComputeScaleDownEnabled = "compute_scale_down_enabled"
296+
nComputeMinInstanceSize = "compute_min_instance_size"
297+
nComputeMaxInstanceSize = "compute_max_instance_size"
298+
nNodeCount = "node_count"
299+
nElectableNodes = "electable_nodes"
300+
301+
valClusterType = "REPLICASET"
302+
valPriority = 7
303+
304+
errFreeCluster = "free cluster (because no " + nRepSpecs + ")"
305+
errRepSpecs = "setting " + nRepSpecs
306+
)

internal/hcl/hcl_test.go

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -35,15 +35,17 @@ func TestClusterToAdvancedCluster(t *testing.T) {
3535
assert.NotEmpty(t, inputFiles)
3636
for _, inputFile := range inputFiles {
3737
testName := strings.TrimSuffix(filepath.Base(inputFile), inSuffix)
38-
inConfig, err := afero.ReadFile(fs, inputFile)
39-
require.NoError(t, err)
40-
outConfig, err := hcl.ClusterToAdvancedCluster(inConfig)
41-
if err == nil {
42-
g.Assert(t, testName, outConfig)
43-
} else {
44-
errMsg, found := errMap[testName]
45-
assert.True(t, found, "error not found for test %s", testName)
46-
assert.Contains(t, err.Error(), errMsg)
47-
}
38+
t.Run(testName, func(t *testing.T) {
39+
inConfig, err := afero.ReadFile(fs, inputFile)
40+
require.NoError(t, err)
41+
outConfig, err := hcl.ClusterToAdvancedCluster(inConfig)
42+
if err == nil {
43+
g.Assert(t, testName, outConfig)
44+
} else {
45+
errMsg, found := errMap[testName]
46+
assert.True(t, found, "error not found for test %s", testName)
47+
assert.Contains(t, err.Error(), errMsg)
48+
}
49+
})
4850
}
4951
}
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
resource "mongodbatlas_cluster" "autoscaling" {
2+
project_id = var.project_id
3+
name = var.cluster_name
4+
disk_size_gb = 100
5+
num_shards = 1
6+
cluster_type = "REPLICASET"
7+
8+
replication_specs {
9+
num_shards = 1
10+
regions_config {
11+
region_name = "US_WEST_2"
12+
electable_nodes = 3
13+
priority = 7
14+
read_only_nodes = 0
15+
}
16+
}
17+
cloud_backup = true
18+
auto_scaling_disk_gb_enabled = true
19+
auto_scaling_compute_enabled = false
20+
auto_scaling_compute_scale_down_enabled = local.scale_down
21+
22+
//Provider Settings "block"
23+
provider_name = "AWS"
24+
provider_auto_scaling_compute_min_instance_size = "M10"
25+
provider_auto_scaling_compute_max_instance_size = "M40"
26+
provider_instance_size_name = "M20"
27+
28+
lifecycle { // To simulate if there a new instance size name to avoid scale cluster down to original value
29+
# Note that provider_instance_size_name won't exist in advanced_cluster so it's an error to refer to it,
30+
# but plugin doesn't help here.
31+
ignore_changes = [provider_instance_size_name]
32+
}
33+
}

0 commit comments

Comments
 (0)