Skip to content

Commit 433c0ad

Browse files
authored
chore: Handles replication specs updates as in SDKv2 and enables most of remaining SDKv2 Acc Tests (#2895)
* chore: Refactor fail_index_key_too_long to be compatible with old implementation and use zero values in advanced_configuration * chore: fix formatting * fix: nil pointer error * chore: Refactor fail_index_key_too_long to be compatible with old implementation and use zero values in advanced_configuration * chore: fix formatting * fix: nil pointer error * chore: Fixes converting to TPF by including all attributes in replication_specs * fix: Prevent increasing num_shards from 1 when state doesn't use it * refactor: Normalizing state can lead to replication specs updates when nodeCount=0 in an electable spec, therefore, not safe * refactor: Normalizing state can lead to replication specs updates when nodeCount=0 in an electable spec, therefore, not safe * fix: Update EbsVolumeType conversion to handle nil or empty string cases * test: Support updating data files with updated diff requests * chore: regenerate mock data * refactor: Enable more tests in TPF * refactor: Enhance patch operations to support suffix and prefix ignore/include options * refactor: Add patch options to ignore and include specific state fields * chore: Re-generate diff payloads * refactor:use existing variable when no variable exist in config, probably there will be an import error if `{groupId`} is used as projectId * refactor: Enable TestAccClusterAdvancedCluster_singleShardedMultiCloud test case * test: Enable another test * refactor: Add AllowOutOfOrder option to MockHTTPDataConfig and update MockRoundTripper behavior * test: Enable more tests for TPF * fix lint error * chore: Update failIndexKeyTooLong retrieval to use getter method * test: Skip tests for Advanced Cluster V2 schema due to unimplemented features * refactor: Replace string checks with strconv.ParseBool for environment variable validation * refactor: Simplify variable declarations in newAttrPatchOperations function * Use empty string when legacyID is not set * add workaround for TPF data source is not fully implemented yet * remove err check in ParseBool * fix: num_shards updates detection * fix: handle nil patch in tenant upgrade request * feat: add forceUpdateAttr option to force update of replication specs when num_shards have changed * revert earlier num_shards fix * chore: remove debugging code [ci skip] * fix: update patch options to use ForceUpdateAttr for replicationSpecs * Revert "add workaround for TPF data source is not fully implemented yet" This reverts commit 6c2516c. * remove: todo for `id` * feat: Support isSchemaUpgrade to handle legacy to new schema * test: Skip TestAccClusterAdvancedClusterConfig_asymmetricGeoShardedNewSchemaAddingRemovingShard until full replication_specs support is added
1 parent 759363a commit 433c0ad

20 files changed

+247
-191
lines changed

internal/common/conversion/error_framework.go

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ import (
44
"encoding/json"
55

66
"github.com/hashicorp/terraform-plugin-framework/diag"
7+
8+
legacyDiag "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
79
)
810

911
type ErrBody interface {
@@ -32,3 +34,13 @@ func AddJSONBodyErrorToDiagnostics(msgPrefix string, err error, diags *diag.Diag
3234
errorJSON := string(errorBytes)
3335
diags.AddError(msgPrefix, errorJSON)
3436
}
37+
38+
func AddLegacyDiags(diags *diag.Diagnostics, legacyDiags legacyDiag.Diagnostics) {
39+
for _, diag := range legacyDiags {
40+
if diag.Severity == legacyDiag.Error {
41+
diags.AddError(diag.Summary, diag.Detail)
42+
} else {
43+
diags.AddWarning(diag.Summary, diag.Detail)
44+
}
45+
}
46+
}

internal/common/update/patch_payload.go

Lines changed: 39 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,28 +12,55 @@ import (
1212
)
1313

1414
type attrPatchOperations struct {
15-
data map[string][]jsondiff.Operation
16-
ignoreInState []string
15+
data map[string][]jsondiff.Operation
16+
ignoreInStateSuffix []string
17+
ignoreInStatePrefix []string
18+
includeInStateSuffix []string
19+
forceUpdateAttr []string
1720
}
1821

1922
func (m *attrPatchOperations) ignoreInStatePath(path string) bool {
20-
for _, ignore := range m.ignoreInState {
23+
for _, include := range m.includeInStateSuffix {
24+
suffix := "/" + include
25+
if strings.HasSuffix(path, suffix) {
26+
return false
27+
}
28+
}
29+
for _, ignore := range m.ignoreInStateSuffix {
2130
suffix := "/" + ignore
2231
if strings.HasSuffix(path, suffix) {
2332
return true
2433
}
2534
}
35+
for _, ignore := range m.ignoreInStatePrefix {
36+
for _, part := range strings.Split(path, "/") {
37+
if ignore == part {
38+
return true
39+
}
40+
}
41+
}
2642
return false
2743
}
2844

2945
func newAttrPatchOperations(patch jsondiff.Patch, options []PatchOptions) *attrPatchOperations {
30-
ignoreInState := []string{}
46+
var (
47+
ignoreSuffixInState []string
48+
ignorePrefixInState []string
49+
includeSuffixInState []string
50+
forceUpdateAttr []string
51+
)
3152
for _, option := range options {
32-
ignoreInState = append(ignoreInState, option.IgnoreInState...)
53+
ignoreSuffixInState = append(ignoreSuffixInState, option.IgnoreInStateSuffix...)
54+
ignorePrefixInState = append(ignorePrefixInState, option.IgnoreInStatePrefix...)
55+
includeSuffixInState = append(includeSuffixInState, option.IncludeInStateSuffix...)
56+
forceUpdateAttr = append(forceUpdateAttr, option.ForceUpdateAttr...)
3357
}
3458
self := &attrPatchOperations{
35-
data: map[string][]jsondiff.Operation{},
36-
ignoreInState: ignoreInState,
59+
data: map[string][]jsondiff.Operation{},
60+
ignoreInStateSuffix: ignoreSuffixInState,
61+
ignoreInStatePrefix: ignorePrefixInState,
62+
includeInStateSuffix: includeSuffixInState,
63+
forceUpdateAttr: forceUpdateAttr,
3764
}
3865
for _, op := range patch {
3966
if op.Path == "" {
@@ -81,7 +108,7 @@ func (m *attrPatchOperations) hasChanged(attr string) bool {
81108
func (m *attrPatchOperations) ChangedAttributes() []string {
82109
attrs := []string{}
83110
for attr := range m.data {
84-
if m.hasChanged(attr) {
111+
if m.hasChanged(attr) || slices.Contains(m.forceUpdateAttr, attr) {
85112
attrs = append(attrs, attr)
86113
}
87114
}
@@ -136,7 +163,10 @@ func convertJSONDiffToJSONPatch(patch jsondiff.Patch) (jsonpatch.Patch, error) {
136163

137164
// Current limitation if the field is set as part of a nested attribute in a map
138165
type PatchOptions struct {
139-
IgnoreInState []string
166+
IgnoreInStateSuffix []string
167+
IgnoreInStatePrefix []string
168+
IncludeInStateSuffix []string
169+
ForceUpdateAttr []string
140170
}
141171

142172
// PatchPayload uses the state and plan to changes to find the patch request, including changes only when:

internal/common/update/patch_payload_test.go

Lines changed: 38 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,17 +22,18 @@ func TestPatchReplicationSpecs(t *testing.T) {
2222
replicationSpec2ZoneName = "replicationSpec2_zoneName"
2323
rootName = "my-cluster"
2424
rootNameUpdated = "my-cluster-updated"
25-
state = admin.ClusterDescription20240805{
26-
Id: &idGlobal,
27-
Name: &rootName,
28-
ReplicationSpecs: &[]admin.ReplicationSpec20240805{
29-
{
30-
Id: &idReplicationSpec1,
31-
ZoneId: &replicationSpec1ZoneID,
32-
ZoneName: &replicationSpec1ZoneNameOld,
33-
},
25+
stateReplicationSpecs = []admin.ReplicationSpec20240805{
26+
{
27+
Id: &idReplicationSpec1,
28+
ZoneId: &replicationSpec1ZoneID,
29+
ZoneName: &replicationSpec1ZoneNameOld,
3430
},
3531
}
32+
state = admin.ClusterDescription20240805{
33+
Id: &idGlobal,
34+
Name: &rootName,
35+
ReplicationSpecs: &stateReplicationSpecs,
36+
}
3637
planOptionalUpdated = admin.ClusterDescription20240805{
3738
Name: &rootName,
3839
ReplicationSpecs: &[]admin.ReplicationSpec20240805{
@@ -188,6 +189,16 @@ func TestPatchReplicationSpecs(t *testing.T) {
188189
plan: &planNoChanges,
189190
patchExpected: nil,
190191
},
192+
"Forced changes when forceUpdateAttr set": {
193+
state: &state,
194+
plan: &planNoChanges,
195+
patchExpected: &admin.ClusterDescription20240805{
196+
ReplicationSpecs: &stateReplicationSpecs,
197+
},
198+
options: []update.PatchOptions{
199+
{ForceUpdateAttr: []string{"replicationSpecs"}},
200+
},
201+
},
191202
"Empty array should return no changes": {
192203
state: &admin.ClusterDescription20240805{
193204
Labels: &[]admin.ComponentLabel{},
@@ -198,12 +209,23 @@ func TestPatchReplicationSpecs(t *testing.T) {
198209
patchExpected: nil,
199210
},
200211
"diskSizeGb ignored in state": {
201-
state: clusterDescriptionDiskSizeNodeCount(50.0, 3, conversion.Pointer(50.0), 0),
202-
plan: clusterDescriptionDiskSizeNodeCount(55.0, 3, nil, 0),
203-
patchExpected: clusterDescriptionDiskSizeNodeCount(55.0, 3, nil, 0),
212+
state: clusterDescriptionDiskSizeNodeCount(50.0, 3, conversion.Pointer(50.0), 0, conversion.Pointer(3500)),
213+
plan: clusterDescriptionDiskSizeNodeCount(55.0, 3, nil, 0, nil),
214+
patchExpected: clusterDescriptionDiskSizeNodeCount(55.0, 3, nil, 0, conversion.Pointer(3500)),
215+
options: []update.PatchOptions{
216+
{
217+
IgnoreInStateSuffix: []string{"diskSizeGB"},
218+
},
219+
},
220+
},
221+
"regionConfigs ignored in state but diskIOPS included": {
222+
state: clusterDescriptionDiskSizeNodeCount(50.0, 3, conversion.Pointer(50.0), 0, conversion.Pointer(3500)),
223+
plan: clusterDescriptionDiskSizeNodeCount(55.0, 3, nil, 0, nil),
224+
patchExpected: clusterDescriptionDiskSizeNodeCount(55.0, 3, nil, 0, conversion.Pointer(3500)),
204225
options: []update.PatchOptions{
205226
{
206-
IgnoreInState: []string{"diskSizeGB"},
227+
IgnoreInStatePrefix: []string{"regionConfigs"},
228+
IncludeInStateSuffix: []string{"diskIOPS"},
207229
},
208230
},
209231
},
@@ -280,7 +302,7 @@ func TestIsEmpty(t *testing.T) {
280302
assert.False(t, update.IsZeroValues(&admin.ClusterDescription20240805{Name: conversion.Pointer("my-cluster")}))
281303
}
282304

283-
func clusterDescriptionDiskSizeNodeCount(diskSizeGBElectable float64, nodeCountElectable int, diskSizeGBReadOnly *float64, nodeCountReadOnly int) *admin.ClusterDescription20240805 {
305+
func clusterDescriptionDiskSizeNodeCount(diskSizeGBElectable float64, nodeCountElectable int, diskSizeGBReadOnly *float64, nodeCountReadOnly int, diskIopsState *int) *admin.ClusterDescription20240805 {
284306
return &admin.ClusterDescription20240805{
285307
ReplicationSpecs: &[]admin.ReplicationSpec20240805{
286308
{
@@ -289,10 +311,12 @@ func clusterDescriptionDiskSizeNodeCount(diskSizeGBElectable float64, nodeCountE
289311
ElectableSpecs: &admin.HardwareSpec20240805{
290312
NodeCount: &nodeCountElectable,
291313
DiskSizeGB: &diskSizeGBElectable,
314+
DiskIOPS: diskIopsState,
292315
},
293316
ReadOnlySpecs: &admin.DedicatedHardwareSpec20240805{
294317
NodeCount: &nodeCountReadOnly,
295318
DiskSizeGB: diskSizeGBReadOnly,
319+
DiskIOPS: diskIopsState,
296320
},
297321
},
298322
},

internal/service/advancedcluster/resource_advanced_cluster_migration_test.go

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -27,25 +27,18 @@ func TestMigAdvancedCluster_replicaSetMultiCloud(t *testing.T) {
2727
}
2828

2929
func TestMigAdvancedCluster_singleShardedMultiCloud(t *testing.T) {
30+
acc.SkipIfAdvancedClusterV2Schema(t) // AttributeName("advanced_configuration"): invalid JSON, expected "{", got "["
3031
testCase := singleShardedMultiCloudTestCase(t, false)
3132
mig.CreateAndRunTest(t, &testCase)
3233
}
3334

3435
func TestMigAdvancedCluster_symmetricGeoShardedOldSchema(t *testing.T) {
36+
acc.SkipIfAdvancedClusterV2Schema(t) // AttributeName("advanced_configuration"): invalid JSON, expected "{", got "["
3537
testCase := symmetricGeoShardedOldSchemaTestCase(t, false)
3638
mig.CreateAndRunTest(t, &testCase)
3739
}
3840

3941
func TestMigAdvancedCluster_asymmetricShardedNewSchema(t *testing.T) {
40-
// TODO: Already prepared for TPF but getting this error, note that TestAccClusterAdvancedClusterConfig_asymmetricShardedNewSchema is passing though:
41-
// resource_advanced_cluster_migration_test.go:39: Step 1/2 error: Check failed: Check 2/15 error: mongodbatlas_advanced_cluster.test: Attribute 'replication_specs.0.region_configs.0.electable_specs.disk_iops' not found
42-
// Check 5/15 error: mongodbatlas_advanced_cluster.test: Attribute 'replication_specs.0.region_configs.0.electable_specs.instance_size' not found
43-
// Check 6/15 error: mongodbatlas_advanced_cluster.test: Attribute 'replication_specs.1.region_configs.0.electable_specs.instance_size' not found
44-
// Check 7/15 error: mongodbatlas_advanced_cluster.test: Attribute 'replication_specs.1.region_configs.0.electable_specs.disk_size_gb' not found
45-
// Check 8/15 error: mongodbatlas_advanced_cluster.test: Attribute 'replication_specs.0.region_configs.0.analytics_specs.disk_size_gb' not found
46-
// Check 9/15 error: mongodbatlas_advanced_cluster.test: Attribute 'replication_specs.1.region_configs.0.analytics_specs.disk_size_gb' not found
47-
// Check 10/15 error: mongodbatlas_advanced_cluster.test: Attribute 'replication_specs.0.region_configs.0.electable_specs.disk_size_gb' not found
48-
// Check 11/15 error: mongodbatlas_advanced_cluster.test: Attribute 'replication_specs.1.region_configs.0.electable_specs.disk_iops' not found
4942
acc.SkipIfAdvancedClusterV2Schema(t)
5043
mig.SkipIfVersionBelow(t, "1.23.0") // version where sharded cluster tier auto-scaling was introduced
5144
testCase := asymmetricShardedNewSchemaTestCase(t, false)

0 commit comments

Comments
 (0)