Skip to content

Commit 19005f0

Browse files
authored
chore: Refactor to convert package (#17)
* extract convert package * extract name consts to its own file
1 parent b7e4ee3 commit 19005f0

13 files changed

+263
-249
lines changed

internal/cli/clu2adv/opts.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@ package clu2adv
33
import (
44
"fmt"
55

6+
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/convert"
67
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/file"
7-
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl"
88
"github.com/spf13/afero"
99
)
1010

@@ -30,7 +30,7 @@ func (o *opts) Run() error {
3030
if err != nil {
3131
return fmt.Errorf("failed to read file %s: %w", o.file, err)
3232
}
33-
outConfig, err := hcl.ClusterToAdvancedCluster(inConfig)
33+
outConfig, err := convert.ClusterToAdvancedCluster(inConfig)
3434
if err != nil {
3535
return err
3636
}

internal/convert/const_names.go

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
package convert
2+
3+
const (
4+
nRepSpecs = "replication_specs"
5+
nConfig = "region_configs"
6+
nConfigSrc = "regions_config"
7+
nElectableSpecs = "electable_specs"
8+
nAutoScaling = "auto_scaling"
9+
nRegionNameSrc = "provider_region_name"
10+
nRegionName = "region_name"
11+
nProviderName = "provider_name"
12+
nBackingProviderName = "backing_provider_name"
13+
nInstanceSizeSrc = "provider_instance_size_name"
14+
nInstanceSize = "instance_size"
15+
nClusterType = "cluster_type"
16+
nPriority = "priority"
17+
nNumShards = "num_shards"
18+
nBackupEnabled = "backup_enabled"
19+
nCloudBackup = "cloud_backup"
20+
nDiskSizeGB = "disk_size_gb"
21+
nDiskGBEnabledSrc = "auto_scaling_disk_gb_enabled"
22+
nComputeEnabledSrc = "auto_scaling_compute_enabled"
23+
nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled"
24+
nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size"
25+
nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size"
26+
nDiskGBEnabled = "disk_gb_enabled"
27+
nComputeEnabled = "compute_enabled"
28+
nComputeScaleDownEnabled = "compute_scale_down_enabled"
29+
nComputeMinInstanceSize = "compute_min_instance_size"
30+
nComputeMaxInstanceSize = "compute_max_instance_size"
31+
nNodeCount = "node_count"
32+
nElectableNodes = "electable_nodes"
33+
)

internal/convert/convert.go

Lines changed: 209 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,209 @@
1+
package convert
2+
3+
import (
4+
"fmt"
5+
6+
"github.com/hashicorp/hcl/v2/hclwrite"
7+
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl"
8+
"github.com/zclconf/go-cty/cty"
9+
)
10+
11+
const (
12+
resourceType = "resource"
13+
cluster = "mongodbatlas_cluster"
14+
advCluster = "mongodbatlas_advanced_cluster"
15+
valClusterType = "REPLICASET"
16+
valPriority = 7
17+
errFreeCluster = "free cluster (because no " + nRepSpecs + ")"
18+
errRepSpecs = "setting " + nRepSpecs
19+
)
20+
21+
type attrVals struct {
22+
req map[string]hclwrite.Tokens
23+
opt map[string]hclwrite.Tokens
24+
}
25+
26+
// ClusterToAdvancedCluster transforms all mongodbatlas_cluster definitions in a
27+
// Terraform configuration file into mongodbatlas_advanced_cluster schema v2 definitions.
28+
// All other resources and data sources are left untouched.
29+
// Note: hclwrite.Tokens are used instead of cty.Value so expressions like var.region can be preserved.
30+
// cty.Value only supports resolved values.
31+
func ClusterToAdvancedCluster(config []byte) ([]byte, error) {
32+
parser, err := hcl.GetParser(config)
33+
if err != nil {
34+
return nil, err
35+
}
36+
for _, resource := range parser.Body().Blocks() {
37+
labels := resource.Labels()
38+
resourceName := labels[0]
39+
if resource.Type() != resourceType || resourceName != cluster {
40+
continue
41+
}
42+
resourceb := resource.Body()
43+
labels[0] = advCluster
44+
resource.SetLabels(labels)
45+
46+
if resourceb.FirstMatchingBlock(nRepSpecs, nil) != nil {
47+
err = fillReplicationSpecs(resourceb)
48+
} else {
49+
err = fillFreeTier(resourceb)
50+
}
51+
if err != nil {
52+
return nil, err
53+
}
54+
55+
resourceb.AppendNewline()
56+
hcl.AppendComment(resourceb, "Generated by atlas-cli-plugin-terraform.")
57+
hcl.AppendComment(resourceb, "Please confirm that all references to this resource are updated.")
58+
}
59+
return parser.Bytes(), nil
60+
}
61+
62+
// fillFreeTier is the entry point to convert clusters in free tier
63+
func fillFreeTier(resourceb *hclwrite.Body) error {
64+
resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType))
65+
config := hclwrite.NewEmptyFile()
66+
configb := config.Body()
67+
hcl.SetAttrInt(configb, "priority", valPriority)
68+
if err := hcl.MoveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil {
69+
return err
70+
}
71+
if err := hcl.MoveAttr(resourceb, configb, nProviderName, nProviderName, errFreeCluster); err != nil {
72+
return err
73+
}
74+
if err := hcl.MoveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil {
75+
return err
76+
}
77+
electableSpec := hclwrite.NewEmptyFile()
78+
if err := hcl.MoveAttr(resourceb, electableSpec.Body(), nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil {
79+
return err
80+
}
81+
configb.SetAttributeRaw(nElectableSpecs, hcl.TokensObject(electableSpec))
82+
83+
repSpecs := hclwrite.NewEmptyFile()
84+
repSpecs.Body().SetAttributeRaw(nConfig, hcl.TokensArrayObject(config))
85+
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs))
86+
return nil
87+
}
88+
89+
// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier)
90+
func fillReplicationSpecs(resourceb *hclwrite.Body) error {
91+
root, errRoot := popRootAttrs(resourceb, errRepSpecs)
92+
if errRoot != nil {
93+
return errRoot
94+
}
95+
repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil)
96+
configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil)
97+
if configSrc == nil {
98+
return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc)
99+
}
100+
101+
resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs
102+
// ok to fail as cloud_backup is optional
103+
_ = hcl.MoveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs)
104+
105+
config, errConfig := getRegionConfigs(configSrc, root)
106+
if errConfig != nil {
107+
return errConfig
108+
}
109+
repSpecs := hclwrite.NewEmptyFile()
110+
repSpecs.Body().SetAttributeRaw(nConfig, config)
111+
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArrayObject(repSpecs))
112+
113+
resourceb.RemoveBlock(repSpecsSrc)
114+
return nil
115+
}
116+
117+
func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
118+
file := hclwrite.NewEmptyFile()
119+
fileb := file.Body()
120+
fileb.SetAttributeRaw(nProviderName, root.req[nProviderName])
121+
if err := hcl.MoveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil {
122+
return nil, err
123+
}
124+
if err := hcl.MoveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil {
125+
return nil, err
126+
}
127+
autoScaling := getAutoScalingOpt(root.opt)
128+
if autoScaling != nil {
129+
fileb.SetAttributeRaw(nAutoScaling, autoScaling)
130+
}
131+
electableSpecs, errElect := getElectableSpecs(configSrc, root)
132+
if errElect != nil {
133+
return nil, errElect
134+
}
135+
fileb.SetAttributeRaw(nElectableSpecs, electableSpecs)
136+
return hcl.TokensArrayObject(file), nil
137+
}
138+
139+
func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) {
140+
file := hclwrite.NewEmptyFile()
141+
fileb := file.Body()
142+
if err := hcl.MoveAttr(configSrc.Body(), fileb, nElectableNodes, nNodeCount, errRepSpecs); err != nil {
143+
return nil, err
144+
}
145+
fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc])
146+
if root.opt[nDiskSizeGB] != nil {
147+
fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB])
148+
}
149+
return hcl.TokensObject(file), nil
150+
}
151+
152+
func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens {
153+
var (
154+
names = [][2]string{ // use slice instead of map to preserve order
155+
{nDiskGBEnabledSrc, nDiskGBEnabled},
156+
{nComputeEnabledSrc, nComputeEnabled},
157+
{nComputeMinInstanceSizeSrc, nComputeMinInstanceSize},
158+
{nComputeMaxInstanceSizeSrc, nComputeMaxInstanceSize},
159+
{nComputeScaleDownEnabledSrc, nComputeScaleDownEnabled},
160+
}
161+
file = hclwrite.NewEmptyFile()
162+
found = false
163+
)
164+
for _, tuple := range names {
165+
src, dst := tuple[0], tuple[1]
166+
if tokens := opt[src]; tokens != nil {
167+
file.Body().SetAttributeRaw(dst, tokens)
168+
found = true
169+
}
170+
}
171+
if !found {
172+
return nil
173+
}
174+
return hcl.TokensObject(file)
175+
}
176+
177+
// popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them.
178+
func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) {
179+
var (
180+
reqNames = []string{
181+
nProviderName,
182+
nInstanceSizeSrc,
183+
}
184+
optNames = []string{
185+
nDiskSizeGB,
186+
nDiskGBEnabledSrc,
187+
nComputeEnabledSrc,
188+
nComputeMinInstanceSizeSrc,
189+
nComputeMaxInstanceSizeSrc,
190+
nComputeScaleDownEnabledSrc,
191+
}
192+
req = make(map[string]hclwrite.Tokens)
193+
opt = make(map[string]hclwrite.Tokens)
194+
)
195+
for _, name := range reqNames {
196+
tokens, err := hcl.PopAttr(body, name, errPrefix)
197+
if err != nil {
198+
return attrVals{}, err
199+
}
200+
req[name] = tokens
201+
}
202+
for _, name := range optNames {
203+
tokens, _ := hcl.PopAttr(body, name, errPrefix)
204+
if tokens != nil {
205+
opt[name] = tokens
206+
}
207+
}
208+
return attrVals{req: req, opt: opt}, nil
209+
}

internal/hcl/hcl_test.go renamed to internal/convert/convert_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
package hcl_test
1+
package convert_test
22

33
import (
44
"encoding/json"
55
"path/filepath"
66
"strings"
77
"testing"
88

9-
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/hcl"
9+
"github.com/mongodb-labs/atlas-cli-plugin-terraform/internal/convert"
1010
"github.com/sebdah/goldie/v2"
1111
"github.com/spf13/afero"
1212
"github.com/stretchr/testify/assert"
@@ -38,7 +38,7 @@ func TestClusterToAdvancedCluster(t *testing.T) {
3838
t.Run(testName, func(t *testing.T) {
3939
inConfig, err := afero.ReadFile(fs, inputFile)
4040
require.NoError(t, err)
41-
outConfig, err := hcl.ClusterToAdvancedCluster(inConfig)
41+
outConfig, err := convert.ClusterToAdvancedCluster(inConfig)
4242
if err == nil {
4343
g.Assert(t, testName, outConfig)
4444
} else {

0 commit comments

Comments
 (0)