-
Notifications
You must be signed in to change notification settings - Fork 1
feat: Converts clusters with one replication_specs, regions_config and shard #16
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
ff3e830
4c429be
f499c7e
e61ef7a
b0eec80
94b9f26
e9b3be5
eb66610
df32933
7443f59
2f168be
f25dc82
7508260
95d6b5d
bef160d
f0794b4
e009ab9
198ead9
f0b08b4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,25 +10,6 @@ import ( | |
"github.com/zclconf/go-cty/cty" | ||
) | ||
|
||
const ( | ||
resourceType = "resource" | ||
cluster = "mongodbatlas_cluster" | ||
advCluster = "mongodbatlas_advanced_cluster" | ||
nameReplicationSpecs = "replication_specs" | ||
nameRegionConfigs = "region_configs" | ||
nameElectableSpecs = "electable_specs" | ||
nameProviderRegionName = "provider_region_name" | ||
nameRegionName = "region_name" | ||
nameProviderName = "provider_name" | ||
nameBackingProviderName = "backing_provider_name" | ||
nameProviderInstanceSizeName = "provider_instance_size_name" | ||
nameInstanceSize = "instance_size" | ||
nameClusterType = "cluster_type" | ||
namePriority = "priority" | ||
|
||
errFreeCluster = "free cluster (because no " + nameReplicationSpecs + ")" | ||
) | ||
|
||
// ClusterToAdvancedCluster transforms all mongodbatlas_cluster definitions in a | ||
// Terraform configuration file into mongodbatlas_advanced_cluster schema v2 definitions. | ||
// All other resources and data sources are left untouched. | ||
|
@@ -45,67 +26,195 @@ func ClusterToAdvancedCluster(config []byte) ([]byte, error) { | |
if resource.Type() != resourceType || resourceName != cluster { | ||
continue | ||
} | ||
resourceBody := resource.Body() | ||
resourceb := resource.Body() | ||
labels[0] = advCluster | ||
resource.SetLabels(labels) | ||
|
||
if isFreeTier(resourceBody) { | ||
if err := fillFreeTier(resourceBody); err != nil { | ||
return nil, err | ||
} | ||
if resourceb.FirstMatchingBlock(nRepSpecs, nil) != nil { | ||
err = fillReplicationSpecs(resourceb) | ||
} else { | ||
err = fillFreeTier(resourceb) | ||
} | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
resourceBody.AppendNewline() | ||
appendComment(resourceBody, "Generated by atlas-cli-plugin-terraform.") | ||
appendComment(resourceBody, "Please confirm that all references to this resource are updated.") | ||
resourceb.AppendNewline() | ||
appendComment(resourceb, "Generated by atlas-cli-plugin-terraform.") | ||
appendComment(resourceb, "Please confirm that all references to this resource are updated.") | ||
} | ||
return parser.Bytes(), nil | ||
} | ||
|
||
func isFreeTier(body *hclwrite.Body) bool { | ||
return body.FirstMatchingBlock(nameReplicationSpecs, nil) == nil | ||
} | ||
|
||
func fillFreeTier(body *hclwrite.Body) error { | ||
const ( | ||
valClusterType = "REPLICASET" | ||
valPriority = 7 | ||
) | ||
body.SetAttributeValue(nameClusterType, cty.StringVal(valClusterType)) | ||
regionConfig := hclwrite.NewEmptyFile() | ||
regionConfigBody := regionConfig.Body() | ||
setAttrInt(regionConfigBody, "priority", valPriority) | ||
if err := moveAttribute(nameProviderRegionName, nameRegionName, body, regionConfigBody, errFreeCluster); err != nil { | ||
// fillFreeTier is the entry point to convert clusters in free tier | ||
func fillFreeTier(resourceb *hclwrite.Body) error { | ||
resourceb.SetAttributeValue(nClusterType, cty.StringVal(valClusterType)) | ||
config := hclwrite.NewEmptyFile() | ||
configb := config.Body() | ||
setAttrInt(configb, "priority", valPriority) | ||
if err := moveAttr(resourceb, configb, nRegionNameSrc, nRegionName, errFreeCluster); err != nil { | ||
return err | ||
} | ||
if err := moveAttribute(nameProviderName, nameProviderName, body, regionConfigBody, errFreeCluster); err != nil { | ||
if err := moveAttr(resourceb, configb, nProviderName, nProviderName, errFreeCluster); err != nil { | ||
return err | ||
} | ||
if err := moveAttribute(nameBackingProviderName, nameBackingProviderName, body, regionConfigBody, errFreeCluster); err != nil { | ||
if err := moveAttr(resourceb, configb, nBackingProviderName, nBackingProviderName, errFreeCluster); err != nil { | ||
return err | ||
} | ||
electableSpec := hclwrite.NewEmptyFile() | ||
if err := moveAttribute(nameProviderInstanceSizeName, nameInstanceSize, body, electableSpec.Body(), errFreeCluster); err != nil { | ||
if err := moveAttr(resourceb, electableSpec.Body(), nInstanceSizeSrc, nInstanceSize, errFreeCluster); err != nil { | ||
return err | ||
} | ||
regionConfigBody.SetAttributeRaw(nameElectableSpecs, tokensObject(electableSpec)) | ||
configb.SetAttributeRaw(nElectableSpecs, tokensObject(electableSpec)) | ||
|
||
replicationSpec := hclwrite.NewEmptyFile() | ||
replicationSpec.Body().SetAttributeRaw(nameRegionConfigs, tokensArrayObject(regionConfig)) | ||
body.SetAttributeRaw(nameReplicationSpecs, tokensArrayObject(replicationSpec)) | ||
repSpecs := hclwrite.NewEmptyFile() | ||
repSpecs.Body().SetAttributeRaw(nConfig, tokensArrayObject(config)) | ||
resourceb.SetAttributeRaw(nRepSpecs, tokensArrayObject(repSpecs)) | ||
return nil | ||
} | ||
|
||
func moveAttribute(fromAttrName, toAttrName string, fromBody, toBody *hclwrite.Body, errPrefix string) error { | ||
attr := fromBody.GetAttribute(fromAttrName) | ||
if attr == nil { | ||
return fmt.Errorf("%s: attribute %s not found", errPrefix, fromAttrName) | ||
// fillReplicationSpecs is the entry point to convert clusters with replications_specs (all but free tier) | ||
func fillReplicationSpecs(resourceb *hclwrite.Body) error { | ||
root, errRoot := popRootAttrs(resourceb, errRepSpecs) | ||
if errRoot != nil { | ||
return errRoot | ||
} | ||
fromBody.RemoveAttribute(fromAttrName) | ||
toBody.SetAttributeRaw(toAttrName, attr.Expr().BuildTokens(nil)) | ||
repSpecsSrc := resourceb.FirstMatchingBlock(nRepSpecs, nil) | ||
configSrc := repSpecsSrc.Body().FirstMatchingBlock(nConfigSrc, nil) | ||
if configSrc == nil { | ||
return fmt.Errorf("%s: %s not found", errRepSpecs, nConfigSrc) | ||
} | ||
|
||
resourceb.RemoveAttribute(nNumShards) // num_shards in root is not relevant, only in replication_specs | ||
// ok to fail as cloud_backup is optional | ||
_ = moveAttr(resourceb, resourceb, nCloudBackup, nBackupEnabled, errRepSpecs) | ||
|
||
config, errConfig := getRegionConfigs(configSrc, root) | ||
EspenAlbert marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if errConfig != nil { | ||
return errConfig | ||
} | ||
repSpecs := hclwrite.NewEmptyFile() | ||
repSpecs.Body().SetAttributeRaw(nConfig, config) | ||
resourceb.SetAttributeRaw(nRepSpecs, tokensArrayObject(repSpecs)) | ||
|
||
resourceb.RemoveBlock(repSpecsSrc) | ||
return nil | ||
} | ||
|
||
// popRootAttrs deletes the attributes common to all replication_specs/regions_config and returns them. | ||
func popRootAttrs(body *hclwrite.Body, errPrefix string) (attrVals, error) { | ||
var ( | ||
reqNames = []string{ | ||
nProviderName, | ||
nInstanceSizeSrc, | ||
} | ||
optNames = []string{ | ||
nDiskSizeGB, | ||
nDiskGBEnabledSrc, | ||
nComputeEnabledSrc, | ||
nComputeMinInstanceSizeSrc, | ||
nComputeMaxInstanceSizeSrc, | ||
nComputeScaleDownEnabledSrc, | ||
} | ||
req = make(map[string]hclwrite.Tokens) | ||
opt = make(map[string]hclwrite.Tokens) | ||
) | ||
for _, name := range reqNames { | ||
tokens, err := popAttr(body, name, errPrefix) | ||
if err != nil { | ||
return attrVals{}, err | ||
} | ||
req[name] = tokens | ||
} | ||
for _, name := range optNames { | ||
tokens, _ := popAttr(body, name, errPrefix) | ||
if tokens != nil { | ||
opt[name] = tokens | ||
} | ||
} | ||
return attrVals{req: req, opt: opt}, nil | ||
} | ||
|
||
func getRegionConfigs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { | ||
file := hclwrite.NewEmptyFile() | ||
fileb := file.Body() | ||
fileb.SetAttributeRaw(nProviderName, root.req[nProviderName]) | ||
if err := moveAttr(configSrc.Body(), fileb, nRegionName, nRegionName, errRepSpecs); err != nil { | ||
return nil, err | ||
} | ||
if err := moveAttr(configSrc.Body(), fileb, nPriority, nPriority, errRepSpecs); err != nil { | ||
return nil, err | ||
} | ||
autoScaling := getAutoScalingOpt(root.opt) | ||
if autoScaling != nil { | ||
fileb.SetAttributeRaw(nAutoScaling, autoScaling) | ||
} | ||
electableSpecs, errElect := getElectableSpecs(configSrc, root) | ||
if errElect != nil { | ||
return nil, errElect | ||
} | ||
fileb.SetAttributeRaw(nElectableSpecs, electableSpecs) | ||
return tokensArrayObject(file), nil | ||
} | ||
|
||
func getElectableSpecs(configSrc *hclwrite.Block, root attrVals) (hclwrite.Tokens, error) { | ||
file := hclwrite.NewEmptyFile() | ||
fileb := file.Body() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. specBlock? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. specBlock for which var you mean? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. instead of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. b is for body, I'm using file as it's how HCL library calls it, instead of calling electableSpecs inside getElectableSpecs, scalingSpecs in getAutoScalingOpt, etc. as we're inside that func so it's implicit what block we're working with |
||
if err := moveAttr(configSrc.Body(), fileb, nElectableNodes, nNodeCount, errRepSpecs); err != nil { | ||
return nil, err | ||
} | ||
fileb.SetAttributeRaw(nInstanceSize, root.req[nInstanceSizeSrc]) | ||
if root.opt[nDiskSizeGB] != nil { | ||
fileb.SetAttributeRaw(nDiskSizeGB, root.opt[nDiskSizeGB]) | ||
} | ||
return tokensObject(file), nil | ||
} | ||
|
||
func getAutoScalingOpt(opt map[string]hclwrite.Tokens) hclwrite.Tokens { | ||
var ( | ||
names = [][2]string{ // use slice instead of map to preserve order | ||
{nDiskGBEnabledSrc, nDiskGBEnabled}, | ||
{nComputeEnabledSrc, nComputeEnabled}, | ||
{nComputeMinInstanceSizeSrc, nComputeMinInstanceSize}, | ||
{nComputeMaxInstanceSizeSrc, nComputeMaxInstanceSize}, | ||
{nComputeScaleDownEnabledSrc, nComputeScaleDownEnabled}, | ||
} | ||
file = hclwrite.NewEmptyFile() | ||
found = false | ||
) | ||
for _, tuple := range names { | ||
src, dst := tuple[0], tuple[1] | ||
if tokens := opt[src]; tokens != nil { | ||
file.Body().SetAttributeRaw(dst, tokens) | ||
found = true | ||
} | ||
} | ||
if !found { | ||
return nil | ||
} | ||
return tokensObject(file) | ||
} | ||
|
||
// popAttr deletes an attribute from fromBody and adds it to toBody. | ||
func moveAttr(fromBody, toBody *hclwrite.Body, fromAttrName, toAttrName, errPrefix string) error { | ||
tokens, err := popAttr(fromBody, fromAttrName, errPrefix) | ||
if err == nil { | ||
toBody.SetAttributeRaw(toAttrName, tokens) | ||
} | ||
return err | ||
} | ||
|
||
// popAttr deletes an attribute and returns it value. | ||
func popAttr(body *hclwrite.Body, attrName, errPrefix string) (hclwrite.Tokens, error) { | ||
attr := body.GetAttribute(attrName) | ||
if attr == nil { | ||
return nil, fmt.Errorf("%s: attribute %s not found", errPrefix, attrName) | ||
} | ||
tokens := attr.Expr().BuildTokens(nil) | ||
body.RemoveAttribute(attrName) | ||
return tokens, nil | ||
} | ||
|
||
func setAttrInt(body *hclwrite.Body, attrName string, number int) { | ||
tokens := hclwrite.Tokens{ | ||
{Type: hclsyntax.TokenNumberLit, Bytes: []byte(strconv.Itoa(number))}, | ||
|
@@ -148,3 +257,50 @@ func getParser(config []byte) (*hclwrite.File, error) { | |
} | ||
return parser, nil | ||
} | ||
|
||
type attrVals struct { | ||
req map[string]hclwrite.Tokens | ||
EspenAlbert marked this conversation as resolved.
Show resolved
Hide resolved
|
||
opt map[string]hclwrite.Tokens | ||
} | ||
|
||
const ( | ||
resourceType = "resource" | ||
cluster = "mongodbatlas_cluster" | ||
advCluster = "mongodbatlas_advanced_cluster" | ||
|
||
nRepSpecs = "replication_specs" | ||
nConfig = "region_configs" | ||
nConfigSrc = "regions_config" | ||
nElectableSpecs = "electable_specs" | ||
nAutoScaling = "auto_scaling" | ||
nRegionNameSrc = "provider_region_name" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. added Src when the cluster attr name is different to adv_cluster, so it's easier to see the equivalence |
||
nRegionName = "region_name" | ||
nProviderName = "provider_name" | ||
nBackingProviderName = "backing_provider_name" | ||
nInstanceSizeSrc = "provider_instance_size_name" | ||
nInstanceSize = "instance_size" | ||
nClusterType = "cluster_type" | ||
nPriority = "priority" | ||
nNumShards = "num_shards" | ||
nBackupEnabled = "backup_enabled" | ||
nCloudBackup = "cloud_backup" | ||
nDiskSizeGB = "disk_size_gb" | ||
nDiskGBEnabledSrc = "auto_scaling_disk_gb_enabled" | ||
nComputeEnabledSrc = "auto_scaling_compute_enabled" | ||
nComputeScaleDownEnabledSrc = "auto_scaling_compute_scale_down_enabled" | ||
nComputeMinInstanceSizeSrc = "provider_auto_scaling_compute_min_instance_size" | ||
nComputeMaxInstanceSizeSrc = "provider_auto_scaling_compute_max_instance_size" | ||
nDiskGBEnabled = "disk_gb_enabled" | ||
nComputeEnabled = "compute_enabled" | ||
nComputeScaleDownEnabled = "compute_scale_down_enabled" | ||
nComputeMinInstanceSize = "compute_min_instance_size" | ||
nComputeMaxInstanceSize = "compute_max_instance_size" | ||
nNodeCount = "node_count" | ||
nElectableNodes = "electable_nodes" | ||
|
||
valClusterType = "REPLICASET" | ||
valPriority = 7 | ||
|
||
errFreeCluster = "free cluster (because no " + nRepSpecs + ")" | ||
errRepSpecs = "setting " + nRepSpecs | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
resource "mongodbatlas_cluster" "autoscaling" { | ||
project_id = var.project_id | ||
name = var.cluster_name | ||
disk_size_gb = 100 | ||
num_shards = 1 | ||
cluster_type = "REPLICASET" | ||
|
||
replication_specs { | ||
num_shards = 1 | ||
regions_config { | ||
region_name = "US_WEST_2" | ||
electable_nodes = 3 | ||
priority = 7 | ||
read_only_nodes = 0 | ||
} | ||
} | ||
cloud_backup = true | ||
auto_scaling_disk_gb_enabled = true | ||
auto_scaling_compute_enabled = false | ||
auto_scaling_compute_scale_down_enabled = local.scale_down | ||
|
||
//Provider Settings "block" | ||
provider_name = "AWS" | ||
provider_auto_scaling_compute_min_instance_size = "M10" | ||
provider_auto_scaling_compute_max_instance_size = "M40" | ||
provider_instance_size_name = "M20" | ||
|
||
lifecycle { // To simulate if there a new instance size name to avoid scale cluster down to original value | ||
# Note that provider_instance_size_name won't exist in advanced_cluster so it's an error to refer to it, | ||
# but plugin doesn't help here. | ||
ignore_changes = [provider_instance_size_name] | ||
} | ||
} |
Uh oh!
There was an error while loading. Please reload this page.