Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 35 additions & 10 deletions internal/convert/adv2v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ func updateResource(resource *hclwrite.Block) (bool, error) {
if hasExpectedBlocksAsAttributes(resourceb) {
return false, nil
}
if err := convertRepSpecs(resourceb); err != nil {
diskSizeGB, _ := hcl.PopAttr(resourceb, nDiskSizeGB, errRoot) // ok to fail as it's optional
if err := convertRepSpecs(resourceb, diskSizeGB); err != nil {
return false, err
}
if err := fillTagsLabelsOpt(resourceb, nTags); err != nil {
Expand All @@ -56,18 +57,29 @@ func updateResource(resource *hclwrite.Block) (bool, error) {
return true, nil
}

func convertRepSpecs(resourceb *hclwrite.Body) error {
func convertRepSpecs(resourceb *hclwrite.Body, diskSizeGB hclwrite.Tokens) error {
var repSpecs []*hclwrite.Body
for {
block := resourceb.FirstMatchingBlock(nRepSpecs, nil)
if block == nil {
break
}
resourceb.RemoveBlock(block)
if err := convertConfig(block.Body()); err != nil {
blockb := block.Body()
numShardsVal := 1 // default to 1 if num_shards not present
if numShardsAttr := blockb.GetAttribute(nNumShards); numShardsAttr != nil {
var err error
if numShardsVal, err = hcl.GetAttrInt(numShardsAttr, errNumShards); err != nil {
return err
}
blockb.RemoveAttribute(nNumShards)
}
if err := convertConfig(blockb, diskSizeGB); err != nil {
return err
}
repSpecs = append(repSpecs, block.Body())
for range numShardsVal {
repSpecs = append(repSpecs, blockb)
}
}
if len(repSpecs) == 0 {
return fmt.Errorf("must have at least one replication_specs")
Expand All @@ -76,7 +88,7 @@ func convertRepSpecs(resourceb *hclwrite.Body) error {
return nil
}

func convertConfig(repSpecs *hclwrite.Body) error {
func convertConfig(repSpecs *hclwrite.Body, diskSizeGB hclwrite.Tokens) error {
var configs []*hclwrite.Body
for {
block := repSpecs.FirstMatchingBlock(nConfig, nil)
Expand All @@ -85,11 +97,11 @@ func convertConfig(repSpecs *hclwrite.Body) error {
}
repSpecs.RemoveBlock(block)
blockb := block.Body()
fillBlockOpt(blockb, nElectableSpecs)
fillBlockOpt(blockb, nReadOnlySpecs)
fillBlockOpt(blockb, nAnalyticsSpecs)
fillBlockOpt(blockb, nAutoScaling)
fillBlockOpt(blockb, nAnalyticsAutoScaling)
fillSpecOpt(blockb, nElectableSpecs, diskSizeGB)
fillSpecOpt(blockb, nReadOnlySpecs, diskSizeGB)
fillSpecOpt(blockb, nAnalyticsSpecs, diskSizeGB)
fillSpecOpt(blockb, nAutoScaling, nil) // auto_scaling doesn't need disk_size_gb
fillSpecOpt(blockb, nAnalyticsAutoScaling, nil) // analytics_auto_scaling doesn't need disk_size_gb
configs = append(configs, blockb)
}
if len(configs) == 0 {
Expand All @@ -99,6 +111,19 @@ func convertConfig(repSpecs *hclwrite.Body) error {
return nil
}

func fillSpecOpt(resourceb *hclwrite.Body, name string, diskSizeGBTokens hclwrite.Tokens) {
block := resourceb.FirstMatchingBlock(name, nil)
if block == nil {
return
}
if diskSizeGBTokens != nil {
blockb := block.Body()
blockb.RemoveAttribute(nDiskSizeGB)
blockb.SetAttributeRaw(nDiskSizeGB, diskSizeGBTokens)
}
fillBlockOpt(resourceb, name)
}

// hasExpectedBlocksAsAttributes checks if any of the expected block names
// exist as attributes in the resource body. In that case conversion is not done
// as advanced cluster is not in a valid SDKv2 configuration.
Expand Down
1 change: 1 addition & 0 deletions internal/convert/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ const (
errRepSpecs = "setting " + nRepSpecs
errPriority = "setting " + nPriority
errNumShards = "setting " + nNumShards
errRoot = "setting root attributes"

commentGeneratedBy = "Generated by atlas-cli-plugin-terraform."
commentConfirmReferences = "Please review the changes and confirm that references to this resource are updated."
Expand Down
113 changes: 113 additions & 0 deletions internal/convert/testdata/adv2v2/disk_size_gb.in.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
resource "mongodbatlas_advanced_cluster" "clu" {
project_id = var.project_id
name = "clu"
cluster_type = "SHARDED"
disk_size_gb = 100
replication_specs {
region_configs {
priority = 7
provider_name = "AWS"
region_name = "US_EAST_1"
electable_specs {
instance_size = "M10"
node_count = 2
}
}
region_configs {
priority = 6
provider_name = "AWS"
region_name = "US_WEST_2"
electable_specs {
instance_size = "M10"
node_count = 1
}
}
}
}

resource "mongodbatlas_advanced_cluster" "clu_var" {
project_id = var.project_id
name = "clu"
cluster_type = "SHARDED"
disk_size_gb = var.disk_size_gb
replication_specs {
region_configs {
priority = 7
provider_name = "AWS"
region_name = "US_EAST_1"
electable_specs {
instance_size = "M10"
node_count = 2
}
}
region_configs {
priority = 6
provider_name = "AWS"
region_name = "US_WEST_2"
electable_specs {
disk_size_gb = 123 # will be ignored and root value will be used instead
instance_size = "M10"
node_count = 1
}
}
}
}

resource "mongodbatlas_advanced_cluster" "clu_keep" {
project_id = var.project_id
name = "clu"
cluster_type = "SHARDED"
replication_specs {
region_configs {
priority = 7
provider_name = "AWS"
region_name = "US_EAST_1"
electable_specs {
instance_size = "M10"
node_count = 2
}
}
region_configs {
priority = 6
provider_name = "AWS"
region_name = "US_WEST_2"
electable_specs {
disk_size_gb = 123 # will be kept as root value is not defined
instance_size = "M10"
node_count = 1
}
}
}
}

resource "mongodbatlas_advanced_cluster" "auto" {
project_id = var.project_id
name = "clu"
cluster_type = "SHARDED"
disk_size_gb = 100
replication_specs {
region_configs {
priority = 7
provider_name = "AWS"
region_name = "US_EAST_1"
electable_specs {
instance_size = "M10"
node_count = 2
}
read_only_specs {
instance_size = "M10"
node_count = 1
}
analytics_specs {
instance_size = "M10"
node_count = 1
}
auto_scaling {
disk_gb_enabled = true # auto_scaling won't get disk_size_gb
}
analytics_auto_scaling {
compute_enabled = true # analytics_auto_scaling won't get disk_size_gb
}
}
}
}
140 changes: 140 additions & 0 deletions internal/convert/testdata/adv2v2/disk_size_gb.out.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
resource "mongodbatlas_advanced_cluster" "clu" {
project_id = var.project_id
name = "clu"
cluster_type = "SHARDED"
replication_specs = [
{
region_configs = [
{
priority = 7
provider_name = "AWS"
region_name = "US_EAST_1"
electable_specs = {
instance_size = "M10"
node_count = 2
disk_size_gb = 100
}
},
{
priority = 6
provider_name = "AWS"
region_name = "US_WEST_2"
electable_specs = {
instance_size = "M10"
node_count = 1
disk_size_gb = 100
}
}
]
}
]

# Updated by atlas-cli-plugin-terraform, please review the changes.
}

resource "mongodbatlas_advanced_cluster" "clu_var" {
project_id = var.project_id
name = "clu"
cluster_type = "SHARDED"
replication_specs = [
{
region_configs = [
{
priority = 7
provider_name = "AWS"
region_name = "US_EAST_1"
electable_specs = {
instance_size = "M10"
node_count = 2
disk_size_gb = var.disk_size_gb
}
},
{
priority = 6
provider_name = "AWS"
region_name = "US_WEST_2"
electable_specs = {
instance_size = "M10"
node_count = 1
disk_size_gb = var.disk_size_gb
}
}
]
}
]

# Updated by atlas-cli-plugin-terraform, please review the changes.
}

resource "mongodbatlas_advanced_cluster" "clu_keep" {
project_id = var.project_id
name = "clu"
cluster_type = "SHARDED"
replication_specs = [
{
region_configs = [
{
priority = 7
provider_name = "AWS"
region_name = "US_EAST_1"
electable_specs = {
instance_size = "M10"
node_count = 2
}
},
{
priority = 6
provider_name = "AWS"
region_name = "US_WEST_2"
electable_specs = {
disk_size_gb = 123 # will be kept as root value is not defined
instance_size = "M10"
node_count = 1
}
}
]
}
]

# Updated by atlas-cli-plugin-terraform, please review the changes.
}

resource "mongodbatlas_advanced_cluster" "auto" {
project_id = var.project_id
name = "clu"
cluster_type = "SHARDED"
replication_specs = [
{
region_configs = [
{
priority = 7
provider_name = "AWS"
region_name = "US_EAST_1"
electable_specs = {
instance_size = "M10"
node_count = 2
disk_size_gb = 100
}
read_only_specs = {
instance_size = "M10"
node_count = 1
disk_size_gb = 100
}
analytics_specs = {
instance_size = "M10"
node_count = 1
disk_size_gb = 100
}
auto_scaling = {
disk_gb_enabled = true # auto_scaling won't get disk_size_gb
}
analytics_auto_scaling = {
compute_enabled = true # analytics_auto_scaling won't get disk_size_gb
}
}
]
}
]

# Updated by atlas-cli-plugin-terraform, please review the changes.
}
3 changes: 2 additions & 1 deletion internal/convert/testdata/adv2v2/errors.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
{
"configuration_file_error": "failed to parse Terraform config file",
"replication_specs_missing_region_configs": "replication_specs must have at least one region_configs",
"missing_replication_specs": "must have at least one replication_specs"
"missing_replication_specs": "must have at least one replication_specs",
"num_shards_not_numerical": "setting num_shards: failed to evaluate number"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure the full context of this error, but not sure how I would fix it 😅

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

as in the other command we distinguish two main use cases:

  • advanced_cluster used directly without a module, we expect a numerical value in num_shards
  • advanced_cluster in a module, in this case we support variables in num_shards and dynamic blocks (PR will follow with this implementation).

see PR description note for more info

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you for the explanation!
FFTR or leave open for others

}
Loading