Skip to content

Commit a66d3a3

Browse files
fix: Adjust update operation to make use of Plan model over Config model (#3136)
* Adjust update operation to make use of Plan model over Config model * TEMPORARY: comment out mocked acceptance tests * comment out logic related to reducing unknown values in plan modifier for replication specs * Revert "TEMPORARY: comment out mocked acceptance tests" This reverts commit 4f2700c. * fix mocked acceptance tests * adjusting auto-scaling test to verify patch request does not include ingored attributes * Revert "comment out logic related to reducing unknown values in plan modifier for replication specs" This reverts commit 34427e6. * commend out code but leave all implementation code * Update internal/service/advancedcluster/resource_advanced_cluster_test.go Co-authored-by: Leo Antoli <[email protected]> --------- Co-authored-by: Leo Antoli <[email protected]>
1 parent 971500d commit a66d3a3

File tree

6 files changed

+66
-38
lines changed

6 files changed

+66
-38
lines changed

internal/service/advancedcluster/resource_advanced_cluster_test.go

Lines changed: 27 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,6 @@ func TestAccClusterAdvancedCluster_defaultWrite(t *testing.T) {
496496
func TestAccClusterAdvancedClusterConfig_replicationSpecsAutoScaling(t *testing.T) {
497497
var (
498498
projectID, clusterName = acc.ProjectIDExecutionWithCluster(t, 4)
499-
clusterNameUpdated = acc.RandomClusterName()
500499
autoScaling = &admin.AdvancedAutoScalingSettings{
501500
Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")},
502501
DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)},
@@ -513,7 +512,7 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAutoScaling(t *testing.
513512
CheckDestroy: acc.CheckDestroyCluster,
514513
Steps: []resource.TestStep{
515514
{
516-
Config: configReplicationSpecsAutoScaling(t, true, projectID, clusterName, autoScaling),
515+
Config: configReplicationSpecsAutoScaling(t, true, projectID, clusterName, autoScaling, "M10", 10),
517516
Check: resource.ComposeAggregateTestCheckFunc(
518517
acc.CheckExistsCluster(resourceName),
519518
acc.TestCheckResourceAttrPreviewProviderV2(true, resourceName, "name", clusterName),
@@ -523,12 +522,14 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAutoScaling(t *testing.
523522
),
524523
},
525524
{
526-
Config: configReplicationSpecsAutoScaling(t, true, projectID, clusterNameUpdated, autoScalingUpdated),
525+
Config: configReplicationSpecsAutoScaling(t, true, projectID, clusterName, autoScalingUpdated, "M20", 20),
527526
Check: resource.ComposeAggregateTestCheckFunc(
528527
acc.CheckExistsCluster(resourceName),
529-
acc.TestCheckResourceAttrPreviewProviderV2(true, resourceName, "name", clusterNameUpdated),
528+
acc.TestCheckResourceAttrPreviewProviderV2(true, resourceName, "name", clusterName),
530529
acc.TestCheckResourceAttrSetPreviewProviderV2(true, resourceName, "replication_specs.0.region_configs.#"),
531530
acc.TestCheckResourceAttrPreviewProviderV2(true, resourceName, "replication_specs.0.region_configs.0.auto_scaling.0.compute_enabled", "true"),
531+
acc.TestCheckResourceAttrPreviewProviderV2(true, resourceName, "replication_specs.0.region_configs.0.electable_specs.0.instance_size", "M10"), // modified instance size in config is ignored
532+
acc.TestCheckResourceAttrPreviewProviderV2(true, resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb", "10"), // modified disk size gb in config is ignored
532533
),
533534
},
534535
acc.TestStepImportCluster(resourceName),
@@ -2058,8 +2059,19 @@ func checkAdvancedDefaultWrite(isAcc bool, name, writeConcern, tls string) resou
20582059
pluralChecks...)
20592060
}
20602061

2061-
func configReplicationSpecsAutoScaling(t *testing.T, isAcc bool, projectID, clusterName string, p *admin.AdvancedAutoScalingSettings) string {
2062+
func configReplicationSpecsAutoScaling(t *testing.T, isAcc bool, projectID, clusterName string, p *admin.AdvancedAutoScalingSettings, elecInstanceSize string, elecDiskSizeGB int) string {
20622063
t.Helper()
2064+
lifecycleIgnoreChanges := ""
2065+
if p.Compute.GetEnabled() {
2066+
lifecycleIgnoreChanges = `
2067+
lifecycle {
2068+
ignore_changes = [
2069+
replication_specs[0].region_configs[0].electable_specs[0].instance_size,
2070+
replication_specs[0].region_configs[0].electable_specs[0].disk_size_gb
2071+
]
2072+
}`
2073+
}
2074+
20632075
return acc.ConvertAdvancedClusterToPreviewProviderV2(t, isAcc, fmt.Sprintf(`
20642076
resource "mongodbatlas_advanced_cluster" "test" {
20652077
project_id = %[1]q
@@ -2069,18 +2081,19 @@ func configReplicationSpecsAutoScaling(t *testing.T, isAcc bool, projectID, clus
20692081
replication_specs {
20702082
region_configs {
20712083
electable_specs {
2072-
instance_size = "M10"
2084+
instance_size = %[3]q
2085+
disk_size_gb = %[4]d
20732086
node_count = 3
20742087
}
20752088
analytics_specs {
20762089
instance_size = "M10"
20772090
node_count = 1
20782091
}
2079-
auto_scaling {
2080-
compute_enabled = %[3]t
2081-
disk_gb_enabled = %[4]t
2082-
compute_max_instance_size = %[5]q
2083-
}
2092+
auto_scaling {
2093+
compute_enabled = %[5]t
2094+
disk_gb_enabled = %[6]t
2095+
compute_max_instance_size = %[7]q
2096+
}
20842097
provider_name = "AWS"
20852098
priority = 7
20862099
region_name = "US_WEST_2"
@@ -2089,8 +2102,9 @@ func configReplicationSpecsAutoScaling(t *testing.T, isAcc bool, projectID, clus
20892102
advanced_configuration {
20902103
oplog_min_retention_hours = 5.5
20912104
}
2092-
}
2093-
`, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize()))
2105+
%[8]s
2106+
}
2107+
`, projectID, clusterName, elecInstanceSize, elecDiskSizeGB, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize(), lifecycleIgnoreChanges))
20942108
}
20952109

20962110
func configReplicationSpecsAnalyticsAutoScaling(t *testing.T, isAcc bool, projectID, clusterName string, p *admin.AdvancedAutoScalingSettings) string {

internal/service/advancedcluster/testdata/TestAccMockableAdvancedCluster_shardedAddAnalyticsAndAutoScaling.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ steps:
199199
- path: /api/atlas/v2/groups/{groupId}/clusters/{clusterName}
200200
method: PATCH
201201
version: '2024-10-23'
202-
text: "{\n \"replicationSpecs\": [\n {\n \"regionConfigs\": [\n {\n \"analyticsSpecs\": {\n \"diskIOPS\": 2000,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 1\n },\n \"autoScaling\": {\n \"compute\": {},\n \"diskGB\": {\n \"enabled\": true\n }\n },\n \"electableSpecs\": {\n \"diskIOPS\": 2000,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 3\n },\n \"priority\": 7,\n \"providerName\": \"AWS\",\n \"regionName\": \"EU_WEST_1\"\n }\n ],\n \"zoneName\": \"ZoneName managed by Terraform\"\n },\n {\n \"regionConfigs\": [\n {\n \"analyticsSpecs\": {\n \"diskIOPS\": 1000,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 1\n },\n \"autoScaling\": {\n \"compute\": {},\n \"diskGB\": {\n \"enabled\": true\n }\n },\n \"electableSpecs\": {\n \"diskIOPS\": 1000,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 3\n },\n \"priority\": 7,\n \"providerName\": \"AWS\",\n \"regionName\": \"EU_WEST_1\"\n }\n ],\n \"zoneName\": \"ZoneName managed by Terraform\"\n }\n ]\n}"
202+
text: "{\n \"replicationSpecs\": [\n {\n \"regionConfigs\": [\n {\n \"analyticsSpecs\": {\n \"diskIOPS\": 2000,\n \"diskSizeGB\": 40,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 1\n },\n \"autoScaling\": {\n \"compute\": {},\n \"diskGB\": {\n \"enabled\": true\n }\n },\n \"electableSpecs\": {\n \"diskIOPS\": 2000,\n \"diskSizeGB\": 40,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 3\n },\n \"priority\": 7,\n \"providerName\": \"AWS\",\n \"regionName\": \"EU_WEST_1\"\n }\n ],\n \"zoneName\": \"ZoneName managed by Terraform\"\n },\n {\n \"regionConfigs\": [\n {\n \"analyticsSpecs\": {\n \"diskIOPS\": 1000,\n \"diskSizeGB\": 40,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 1\n },\n \"autoScaling\": {\n \"compute\": {},\n \"diskGB\": {\n \"enabled\": true\n }\n },\n \"electableSpecs\": {\n \"diskIOPS\": 1000,\n \"diskSizeGB\": 40,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 3\n },\n \"priority\": 7,\n \"providerName\": \"AWS\",\n \"regionName\": \"EU_WEST_1\"\n }\n ],\n \"zoneName\": \"ZoneName managed by Terraform\"\n }\n ]\n}"
203203
responses:
204204
- response_index: 63
205205
status: 200
@@ -263,7 +263,7 @@ steps:
263263
- path: /api/atlas/v2/groups/{groupId}/clusters/{clusterName}
264264
method: PATCH
265265
version: '2024-10-23'
266-
text: "{\n \"replicationSpecs\": [\n {\n \"regionConfigs\": [\n {\n \"analyticsSpecs\": {\n \"diskIOPS\": 2000,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 1\n },\n \"autoScaling\": {\n \"compute\": {},\n \"diskGB\": {\n \"enabled\": true\n }\n },\n \"electableSpecs\": {\n \"diskIOPS\": 2000,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 3\n },\n \"priority\": 7,\n \"providerName\": \"AWS\",\n \"regionName\": \"EU_WEST_1\"\n }\n ],\n \"zoneName\": \"ZoneName managed by Terraform\"\n },\n {\n \"regionConfigs\": [\n {\n \"analyticsSpecs\": {\n \"diskIOPS\": 1000,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 1\n },\n \"autoScaling\": {\n \"compute\": {},\n \"diskGB\": {\n \"enabled\": true\n }\n },\n \"electableSpecs\": {\n \"diskIOPS\": 1000,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 3\n },\n \"priority\": 7,\n \"providerName\": \"AWS\",\n \"regionName\": \"EU_WEST_1\"\n }\n ],\n \"zoneName\": \"ZoneName managed by Terraform\"\n }\n ]\n}"
266+
text: "{\n \"replicationSpecs\": [\n {\n \"regionConfigs\": [\n {\n \"analyticsSpecs\": {\n \"diskIOPS\": 2000,\n \"diskSizeGB\": 40,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 1\n },\n \"autoScaling\": {\n \"compute\": {},\n \"diskGB\": {\n \"enabled\": true\n }\n },\n \"electableSpecs\": {\n \"diskIOPS\": 2000,\n \"diskSizeGB\": 40,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 3\n },\n \"priority\": 7,\n \"providerName\": \"AWS\",\n \"regionName\": \"EU_WEST_1\"\n }\n ],\n \"zoneName\": \"ZoneName managed by Terraform\"\n },\n {\n \"regionConfigs\": [\n {\n \"analyticsSpecs\": {\n \"diskIOPS\": 1000,\n \"diskSizeGB\": 40,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 1\n },\n \"autoScaling\": {\n \"compute\": {},\n \"diskGB\": {\n \"enabled\": true\n }\n },\n \"electableSpecs\": {\n \"diskIOPS\": 1000,\n \"diskSizeGB\": 40,\n \"ebsVolumeType\": \"PROVISIONED\",\n \"instanceSize\": \"M30\",\n \"nodeCount\": 3\n },\n \"priority\": 7,\n \"providerName\": \"AWS\",\n \"regionName\": \"EU_WEST_1\"\n }\n ],\n \"zoneName\": \"ZoneName managed by Terraform\"\n }\n ]\n}"
267267
responses:
268268
- response_index: 63
269269
status: 200

internal/service/advancedcluster/testdata/TestAccMockableAdvancedCluster_shardedAddAnalyticsAndAutoScaling/02_01_PATCH__api_atlas_v2_groups_{groupId}_clusters_{clusterName}_2024-10-23.json

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
{
66
"analyticsSpecs": {
77
"diskIOPS": 2000,
8+
"diskSizeGB": 40,
89
"ebsVolumeType": "PROVISIONED",
910
"instanceSize": "M30",
1011
"nodeCount": 1
@@ -17,6 +18,7 @@
1718
},
1819
"electableSpecs": {
1920
"diskIOPS": 2000,
21+
"diskSizeGB": 40,
2022
"ebsVolumeType": "PROVISIONED",
2123
"instanceSize": "M30",
2224
"nodeCount": 3
@@ -33,6 +35,7 @@
3335
{
3436
"analyticsSpecs": {
3537
"diskIOPS": 1000,
38+
"diskSizeGB": 40,
3639
"ebsVolumeType": "PROVISIONED",
3740
"instanceSize": "M30",
3841
"nodeCount": 1
@@ -45,6 +48,7 @@
4548
},
4649
"electableSpecs": {
4750
"diskIOPS": 1000,
51+
"diskSizeGB": 40,
4852
"ebsVolumeType": "PROVISIONED",
4953
"instanceSize": "M30",
5054
"nodeCount": 3

internal/service/advancedclustertpf/plan_modifier.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,14 +52,16 @@ func useStateForUnknowns(ctx context.Context, diags *diag.Diagnostics, state, pl
5252
attributeChanges := schemafunc.NewAttributeChanges(ctx, state, plan)
5353
keepUnknown := []string{"connection_strings", "state_name"} // Volatile attributes, should not be copied from state
5454
keepUnknown = append(keepUnknown, attributeChanges.KeepUnknown(attributeRootChangeMapping)...)
55-
keepUnknown = append(keepUnknown, determineKeepUnknownsAutoScaling(ctx, diags, state, plan)...)
55+
// pending revision if logic can be reincorporated safely: keepUnknown = append(keepUnknown, determineKeepUnknownsAutoScaling(ctx, diags, state, plan)...)
5656
schemafunc.CopyUnknowns(ctx, state, plan, keepUnknown, nil)
57+
/* pending revision if logic can be reincorporated safely:
5758
if slices.Contains(keepUnknown, "replication_specs") {
5859
useStateForUnknownsReplicationSpecs(ctx, diags, state, plan, &attributeChanges)
5960
}
61+
*/
6062
}
6163

62-
func useStateForUnknownsReplicationSpecs(ctx context.Context, diags *diag.Diagnostics, state, plan *TFModel, attrChanges *schemafunc.AttributeChanges) {
64+
func UseStateForUnknownsReplicationSpecs(ctx context.Context, diags *diag.Diagnostics, state, plan *TFModel, attrChanges *schemafunc.AttributeChanges) {
6365
stateRepSpecsTF := TFModelList[TFReplicationSpecsModel](ctx, diags, state.ReplicationSpecs)
6466
planRepSpecsTF := TFModelList[TFReplicationSpecsModel](ctx, diags, plan.ReplicationSpecs)
6567
if diags.HasError() {

internal/service/advancedclustertpf/resource.go

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -222,37 +222,37 @@ func (r *rs) Read(ctx context.Context, req resource.ReadRequest, resp *resource.
222222
}
223223

224224
func (r *rs) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
225-
var state, configModel TFModel
225+
var state, plan TFModel
226226
diags := &resp.Diagnostics
227-
diags.Append(req.Config.Get(ctx, &configModel)...)
227+
diags.Append(req.Plan.Get(ctx, &plan)...)
228228
diags.Append(req.State.Get(ctx, &state)...)
229229
if diags.HasError() {
230230
return
231231
}
232-
waitParams := resolveClusterWaitParams(ctx, &configModel, diags, operationUpdate)
232+
waitParams := resolveClusterWaitParams(ctx, &plan, diags, operationUpdate)
233233
if diags.HasError() {
234234
return
235235
}
236236

237237
// FCV update is intentionally handled before any other cluster updates, and will wait for cluster to reach IDLE state before continuing
238-
clusterResp := r.applyPinnedFCVChanges(ctx, diags, &state, &configModel, waitParams)
238+
clusterResp := r.applyPinnedFCVChanges(ctx, diags, &state, &plan, waitParams)
239239
if diags.HasError() {
240240
return
241241
}
242242

243243
{
244-
diff := findClusterDiff(ctx, &state, &configModel, diags)
244+
diff := findClusterDiff(ctx, &state, &plan, diags)
245245
if diags.HasError() {
246246
return
247247
}
248248
switch {
249249
case diff.isUpgradeTenantToFlex:
250-
if flexOut := handleFlexUpgrade(ctx, diags, r.Client, waitParams, &configModel); flexOut != nil {
250+
if flexOut := handleFlexUpgrade(ctx, diags, r.Client, waitParams, &plan); flexOut != nil {
251251
diags.Append(resp.State.Set(ctx, flexOut)...)
252252
}
253253
return
254254
case diff.isUpdateOfFlex:
255-
if flexOut := handleFlexUpdate(ctx, diags, r.Client, &configModel); flexOut != nil {
255+
if flexOut := handleFlexUpdate(ctx, diags, r.Client, &plan); flexOut != nil {
256256
diags.Append(resp.State.Set(ctx, flexOut)...)
257257
}
258258
return
@@ -261,14 +261,14 @@ func (r *rs) Update(ctx context.Context, req resource.UpdateRequest, resp *resou
261261
case diff.isUpgradeTenant():
262262
clusterResp = UpgradeTenant(ctx, diags, r.Client, waitParams, diff.upgradeTenantReq)
263263
case diff.isClusterPatchOnly():
264-
clusterResp = r.applyClusterChanges(ctx, diags, &state, &configModel, diff.clusterPatchOnlyReq, waitParams)
264+
clusterResp = r.applyClusterChanges(ctx, diags, &state, &plan, diff.clusterPatchOnlyReq, waitParams)
265265
}
266266
if diags.HasError() {
267267
return
268268
}
269269
}
270-
patchReqProcessArgs := update.PatchPayloadTpf(ctx, diags, &state.AdvancedConfiguration, &configModel.AdvancedConfiguration, NewAtlasReqAdvancedConfiguration)
271-
patchReqProcessArgsLegacy := update.PatchPayloadTpf(ctx, diags, &state.AdvancedConfiguration, &configModel.AdvancedConfiguration, NewAtlasReqAdvancedConfigurationLegacy)
270+
patchReqProcessArgs := update.PatchPayloadTpf(ctx, diags, &state.AdvancedConfiguration, &plan.AdvancedConfiguration, NewAtlasReqAdvancedConfiguration)
271+
patchReqProcessArgsLegacy := update.PatchPayloadTpf(ctx, diags, &state.AdvancedConfiguration, &plan.AdvancedConfiguration, NewAtlasReqAdvancedConfigurationLegacy)
272272
if diags.HasError() {
273273
return
274274
}
@@ -279,9 +279,9 @@ func (r *rs) Update(ctx context.Context, req resource.UpdateRequest, resp *resou
279279
var modelOut *TFModel
280280
if clusterResp == nil { // no Atlas updates needed but override is still needed (e.g. tags going from nil to [] or vice versa)
281281
modelOut = &state
282-
overrideAttributesWithPrevStateValue(&configModel, modelOut)
282+
overrideAttributesWithPrevStateValue(&plan, modelOut)
283283
} else {
284-
modelOut, _ = getBasicClusterModelResource(ctx, diags, r.Client, clusterResp, &configModel)
284+
modelOut, _ = getBasicClusterModelResource(ctx, diags, r.Client, clusterResp, &plan)
285285
if diags.HasError() {
286286
return
287287
}
@@ -561,31 +561,31 @@ func findClusterDiff(ctx context.Context, state, plan *TFModel, diags *diag.Diag
561561
return clusterDiff{clusterPatchOnlyReq: patchReq}
562562
}
563563

564-
func handleFlexUpgrade(ctx context.Context, diags *diag.Diagnostics, client *config.MongoDBClient, waitParams *ClusterWaitParams, configModel *TFModel) *TFModel {
565-
configReq := normalizeFromTFModel(ctx, configModel, diags, false)
564+
func handleFlexUpgrade(ctx context.Context, diags *diag.Diagnostics, client *config.MongoDBClient, waitParams *ClusterWaitParams, plan *TFModel) *TFModel {
565+
configReq := normalizeFromTFModel(ctx, plan, diags, false)
566566
if diags.HasError() {
567567
return nil
568568
}
569569
flexCluster := FlexUpgrade(ctx, diags, client, waitParams, GetUpgradeToFlexClusterRequest(configReq))
570570
if diags.HasError() {
571571
return nil
572572
}
573-
return NewTFModelFlexResource(ctx, diags, flexCluster, GetPriorityOfFlexReplicationSpecs(configReq.ReplicationSpecs), configModel)
573+
return NewTFModelFlexResource(ctx, diags, flexCluster, GetPriorityOfFlexReplicationSpecs(configReq.ReplicationSpecs), plan)
574574
}
575575

576-
func handleFlexUpdate(ctx context.Context, diags *diag.Diagnostics, client *config.MongoDBClient, configModel *TFModel) *TFModel {
577-
configReq := normalizeFromTFModel(ctx, configModel, diags, false)
576+
func handleFlexUpdate(ctx context.Context, diags *diag.Diagnostics, client *config.MongoDBClient, plan *TFModel) *TFModel {
577+
configReq := normalizeFromTFModel(ctx, plan, diags, false)
578578
if diags.HasError() {
579579
return nil
580580
}
581-
flexCluster, err := flexcluster.UpdateFlexCluster(ctx, configModel.ProjectID.ValueString(), configModel.Name.ValueString(),
581+
flexCluster, err := flexcluster.UpdateFlexCluster(ctx, plan.ProjectID.ValueString(), plan.Name.ValueString(),
582582
GetFlexClusterUpdateRequest(configReq.Tags, configReq.TerminationProtectionEnabled),
583583
client.AtlasV2.FlexClustersApi)
584584
if err != nil {
585585
diags.AddError(flexcluster.ErrorUpdateFlex, err.Error())
586586
return nil
587587
}
588-
return NewTFModelFlexResource(ctx, diags, flexCluster, GetPriorityOfFlexReplicationSpecs(configReq.ReplicationSpecs), configModel)
588+
return NewTFModelFlexResource(ctx, diags, flexCluster, GetPriorityOfFlexReplicationSpecs(configReq.ReplicationSpecs), plan)
589589
}
590590

591591
func isShardingConfigUpgrade(ctx context.Context, state, plan *TFModel, diags *diag.Diagnostics) bool {

internal/testutil/acc/advanced_cluster_preview_provider_v2.go

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,13 @@ func AttrNameToPreviewProviderV2(isAcc bool, name string) string {
9393
return name
9494
}
9595

96+
func attrReferenceToPreviewProviderV2(name string) string {
97+
for _, singleAttrName := range tpfSingleNestedAttrs {
98+
name = strings.ReplaceAll(name, singleAttrName+"[0]", singleAttrName)
99+
}
100+
return name
101+
}
102+
96103
func ConvertAdvancedClusterToPreviewProviderV2(t *testing.T, isAcc bool, def string) string {
97104
t.Helper()
98105
if skipPreviewProviderV2Work(isAcc) {
@@ -114,8 +121,9 @@ func ConvertAdvancedClusterToPreviewProviderV2(t *testing.T, isAcc bool, def str
114121
convertKeyValueAttrs(t, "labels", writeBody)
115122
convertKeyValueAttrs(t, "tags", writeBody)
116123
}
117-
content := parse.Bytes()
118-
return string(content)
124+
result := string(parse.Bytes())
125+
result = attrReferenceToPreviewProviderV2(result) // useful for lifecycle ingore definitions
126+
return result
119127
}
120128

121129
func skipPreviewProviderV2Work(isAcc bool) bool {

0 commit comments

Comments
 (0)