Skip to content

Commit 0085b85

Browse files
authored
feat: Adds global_cluster_self_managed_sharding to mongodbatlas_advanced_cluster resource (#2348)
* add attribute global_cluster_self_managed_sharding * changelog * tests * example * doc * apply feedback [skip ci] * add clarification [skip ci] * apply feedback
1 parent 36aa748 commit 0085b85

File tree

10 files changed

+176
-40
lines changed

10 files changed

+176
-40
lines changed

.changelog/2348.txt

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
```release-note:enhancement
2+
resource/mongodbatlas_advanced_cluster: Adds attribute `global_cluster_self_managed_sharding`
3+
```
4+
5+
```release-note:enhancement
6+
data-source/mongodbatlas_advanced_cluster: Adds attribute `global_cluster_self_managed_sharding`
7+
```
8+
9+
```release-note:enhancement
10+
data-source/mongodbatlas_advanced_clusters: Adds attribute `global_cluster_self_managed_sharding`
11+
```

examples/mongodbatlas_advanced_cluster/global-cluster/main.tf

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,13 @@ provider "mongodbatlas" {
44
}
55

66
resource "mongodbatlas_advanced_cluster" "cluster" {
7-
project_id = mongodbatlas_project.project.id
8-
name = var.cluster_name
9-
cluster_type = "GEOSHARDED"
7+
project_id = mongodbatlas_project.project.id
8+
name = var.cluster_name
9+
cluster_type = "GEOSHARDED"
10+
11+
# uncomment next line to use self-managed sharding, see doc for more info
12+
# global_cluster_self_managed_sharding = true
13+
1014
backup_enabled = true
1115

1216
replication_specs { # zone n1

internal/service/advancedcluster/data_source_advanced_cluster.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -225,6 +225,10 @@ func DataSource() *schema.Resource {
225225
Type: schema.TypeString,
226226
Computed: true,
227227
},
228+
"global_cluster_self_managed_sharding": {
229+
Type: schema.TypeBool,
230+
Computed: true,
231+
},
228232
},
229233
}
230234
}
@@ -320,6 +324,9 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.
320324
if err := d.Set("version_release_system", cluster.GetVersionReleaseSystem()); err != nil {
321325
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "version_release_system", clusterName, err))
322326
}
327+
if err := d.Set("global_cluster_self_managed_sharding", cluster.GetGlobalClusterSelfManagedSharding()); err != nil {
328+
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err))
329+
}
323330

324331
processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute()
325332
if err != nil {

internal/service/advancedcluster/data_source_advanced_clusters.go

Lines changed: 25 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,10 @@ func PluralDataSource() *schema.Resource {
233233
Type: schema.TypeString,
234234
Computed: true,
235235
},
236+
"global_cluster_self_managed_sharding": {
237+
Type: schema.TypeBool,
238+
Computed: true,
239+
},
236240
},
237241
},
238242
},
@@ -273,26 +277,27 @@ func flattenAdvancedClusters(ctx context.Context, connV2 *admin.APIClient, clust
273277
}
274278

275279
result := map[string]any{
276-
"advanced_configuration": flattenProcessArgs(processArgs),
277-
"backup_enabled": cluster.GetBackupEnabled(),
278-
"bi_connector_config": flattenBiConnectorConfig(cluster.GetBiConnector()),
279-
"cluster_type": cluster.GetClusterType(),
280-
"create_date": conversion.TimePtrToStringPtr(cluster.CreateDate),
281-
"connection_strings": flattenConnectionStrings(cluster.GetConnectionStrings()),
282-
"disk_size_gb": cluster.GetDiskSizeGB(),
283-
"encryption_at_rest_provider": cluster.GetEncryptionAtRestProvider(),
284-
"labels": flattenLabels(cluster.GetLabels()),
285-
"tags": conversion.FlattenTags(cluster.GetTags()),
286-
"mongo_db_major_version": cluster.GetMongoDBMajorVersion(),
287-
"mongo_db_version": cluster.GetMongoDBVersion(),
288-
"name": cluster.GetName(),
289-
"paused": cluster.GetPaused(),
290-
"pit_enabled": cluster.GetPitEnabled(),
291-
"replication_specs": replicationSpecs,
292-
"root_cert_type": cluster.GetRootCertType(),
293-
"state_name": cluster.GetStateName(),
294-
"termination_protection_enabled": cluster.GetTerminationProtectionEnabled(),
295-
"version_release_system": cluster.GetVersionReleaseSystem(),
280+
"advanced_configuration": flattenProcessArgs(processArgs),
281+
"backup_enabled": cluster.GetBackupEnabled(),
282+
"bi_connector_config": flattenBiConnectorConfig(cluster.GetBiConnector()),
283+
"cluster_type": cluster.GetClusterType(),
284+
"create_date": conversion.TimePtrToStringPtr(cluster.CreateDate),
285+
"connection_strings": flattenConnectionStrings(cluster.GetConnectionStrings()),
286+
"disk_size_gb": cluster.GetDiskSizeGB(),
287+
"encryption_at_rest_provider": cluster.GetEncryptionAtRestProvider(),
288+
"labels": flattenLabels(cluster.GetLabels()),
289+
"tags": conversion.FlattenTags(cluster.GetTags()),
290+
"mongo_db_major_version": cluster.GetMongoDBMajorVersion(),
291+
"mongo_db_version": cluster.GetMongoDBVersion(),
292+
"name": cluster.GetName(),
293+
"paused": cluster.GetPaused(),
294+
"pit_enabled": cluster.GetPitEnabled(),
295+
"replication_specs": replicationSpecs,
296+
"root_cert_type": cluster.GetRootCertType(),
297+
"state_name": cluster.GetStateName(),
298+
"termination_protection_enabled": cluster.GetTerminationProtectionEnabled(),
299+
"version_release_system": cluster.GetVersionReleaseSystem(),
300+
"global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(),
296301
}
297302
results = append(results, result)
298303
}

internal/service/advancedcluster/resource_advanced_cluster.go

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,11 @@ func Resource() *schema.Resource {
317317
Optional: true,
318318
Description: "Submit this field alongside your topology reconfiguration to request a new regional outage resistant topology",
319319
},
320+
"global_cluster_self_managed_sharding": {
321+
Type: schema.TypeBool,
322+
Optional: true,
323+
Computed: true,
324+
},
320325
},
321326
Timeouts: &schema.ResourceTimeout{
322327
Create: schema.DefaultTimeout(3 * time.Hour),
@@ -409,6 +414,9 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.
409414
if v, ok := d.GetOk("version_release_system"); ok {
410415
params.VersionReleaseSystem = conversion.StringPtr(v.(string))
411416
}
417+
if v, ok := d.GetOk("global_cluster_self_managed_sharding"); ok {
418+
params.GlobalClusterSelfManagedSharding = conversion.Pointer(v.(bool))
419+
}
412420

413421
// Validate oplog_size_mb to show the error before the cluster is created.
414422
if oplogSizeMB, ok := d.GetOkExists("advanced_configuration.0.oplog_size_mb"); ok {
@@ -573,6 +581,10 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
573581
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "accept_data_risks_and_force_replica_set_reconfig", clusterName, err))
574582
}
575583

584+
if err := d.Set("global_cluster_self_managed_sharding", cluster.GetGlobalClusterSelfManagedSharding()); err != nil {
585+
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err))
586+
}
587+
576588
processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute()
577589
if err != nil {
578590
return diag.FromErr(fmt.Errorf(errorConfigRead, clusterName, err))
@@ -685,6 +697,10 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.
685697
cluster.VersionReleaseSystem = conversion.StringPtr(d.Get("version_release_system").(string))
686698
}
687699

700+
if d.HasChange("global_cluster_self_managed_sharding") {
701+
cluster.GlobalClusterSelfManagedSharding = conversion.Pointer(d.Get("global_cluster_self_managed_sharding").(bool))
702+
}
703+
688704
if d.HasChange("accept_data_risks_and_force_replica_set_reconfig") {
689705
if strTime := d.Get("accept_data_risks_and_force_replica_set_reconfig").(string); strTime != "" {
690706
t, ok := conversion.StringToTime(strTime)

internal/service/advancedcluster/resource_advanced_cluster_test.go

Lines changed: 104 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -40,13 +40,16 @@ func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) {
4040
resource.TestCheckResourceAttr(resourceName, "name", clusterName),
4141
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.#"),
4242
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.region_configs.#"),
43-
resource.TestCheckResourceAttrSet(resourceName, "termination_protection_enabled"),
43+
resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", "false"),
44+
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "false"),
4445
resource.TestCheckResourceAttr(dataSourceName, "name", clusterName),
4546
resource.TestCheckResourceAttr(dataSourceName, "termination_protection_enabled", "false"),
47+
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "false"),
4648
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.#"),
4749
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.replication_specs.#"),
4850
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name"),
4951
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.termination_protection_enabled"),
52+
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.global_cluster_self_managed_sharding"),
5053
),
5154
},
5255
{
@@ -58,12 +61,16 @@ func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) {
5861
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.#"),
5962
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.region_configs.#"),
6063
resource.TestCheckResourceAttr(resourceName, "labels.#", "0"),
64+
resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", "false"),
65+
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "false"),
6166
resource.TestCheckResourceAttr(dataSourceName, "name", clusterNameUpdated),
6267
resource.TestCheckResourceAttr(dataSourceName, "termination_protection_enabled", "false"),
68+
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "false"),
6369
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.#"),
6470
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.replication_specs.#"),
6571
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name"),
6672
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.termination_protection_enabled"),
73+
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.global_cluster_self_managed_sharding"),
6774
),
6875
},
6976
{
@@ -547,11 +554,9 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAnalyticsAutoScaling(t
547554

548555
func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *testing.T) {
549556
var (
550-
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
551-
projectName = acc.RandomProjectName()
552-
clusterName = acc.RandomClusterName() // No ProjectIDExecution to avoid cross-region limits because multi-region
553-
numShards = "1"
554-
numShardsUpdated = "2"
557+
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
558+
projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region
559+
clusterName = acc.RandomClusterName()
555560
)
556561

557562
resource.ParallelTest(t, resource.TestCase{
@@ -560,21 +565,27 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *tes
560565
CheckDestroy: acc.CheckDestroyCluster,
561566
Steps: []resource.TestStep{
562567
{
563-
Config: configMultiZoneWithShards(orgID, projectName, clusterName, numShards, numShards),
568+
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false),
564569
Check: resource.ComposeTestCheckFunc(
565570
checkExists(resourceName),
566571
resource.TestCheckResourceAttr(resourceName, "name", clusterName),
567572
resource.TestCheckResourceAttr(resourceName, "replication_specs.0.num_shards", "1"),
568573
resource.TestCheckResourceAttr(resourceName, "replication_specs.1.num_shards", "1"),
574+
resource.TestCheckResourceAttr(dataSourceName, "name", clusterName),
575+
resource.TestCheckResourceAttr(dataSourceName, "replication_specs.0.num_shards", "1"),
576+
resource.TestCheckResourceAttr(dataSourceName, "replication_specs.1.num_shards", "1"),
569577
),
570578
},
571579
{
572-
Config: configMultiZoneWithShards(orgID, projectName, clusterName, numShardsUpdated, numShards),
580+
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 2, 1, false),
573581
Check: resource.ComposeTestCheckFunc(
574582
checkExists(resourceName),
575583
resource.TestCheckResourceAttr(resourceName, "name", clusterName),
576584
resource.TestCheckResourceAttr(resourceName, "replication_specs.0.num_shards", "2"),
577585
resource.TestCheckResourceAttr(resourceName, "replication_specs.1.num_shards", "1"),
586+
resource.TestCheckResourceAttr(dataSourceName, "name", clusterName),
587+
resource.TestCheckResourceAttr(dataSourceName, "replication_specs.0.num_shards", "2"),
588+
resource.TestCheckResourceAttr(dataSourceName, "replication_specs.1.num_shards", "1"),
578589
),
579590
},
580591
},
@@ -656,6 +667,53 @@ func TestAccClusterAdvancedCluster_withTags(t *testing.T) {
656667
})
657668
}
658669

670+
func TestAccClusterAdvancedClusterConfig_selfManagedSharding(t *testing.T) {
671+
var (
672+
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
673+
projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region
674+
clusterName = acc.RandomClusterName()
675+
)
676+
677+
resource.ParallelTest(t, resource.TestCase{
678+
PreCheck: func() { acc.PreCheckBasic(t) },
679+
ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
680+
CheckDestroy: acc.CheckDestroyCluster,
681+
Steps: []resource.TestStep{
682+
{
683+
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, true),
684+
Check: resource.ComposeTestCheckFunc(
685+
checkExists(resourceName),
686+
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "true"),
687+
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "true"),
688+
),
689+
},
690+
{
691+
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false),
692+
ExpectError: regexp.MustCompile("CANNOT_MODIFY_GLOBAL_CLUSTER_MANAGEMENT_SETTING"),
693+
},
694+
},
695+
})
696+
}
697+
698+
func TestAccClusterAdvancedClusterConfig_selfManagedShardingIncorrectType(t *testing.T) {
699+
var (
700+
projectID = acc.ProjectIDExecution(t)
701+
clusterName = acc.RandomClusterName()
702+
)
703+
704+
resource.ParallelTest(t, resource.TestCase{
705+
PreCheck: func() { acc.PreCheckBasic(t) },
706+
ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
707+
CheckDestroy: acc.CheckDestroyCluster,
708+
Steps: []resource.TestStep{
709+
{
710+
Config: configIncorrectTypeGobalClusterSelfManagedSharding(projectID, clusterName),
711+
ExpectError: regexp.MustCompile("CANNOT_SET_SELF_MANAGED_SHARDING_FOR_NON_GLOBAL_CLUSTER"),
712+
},
713+
},
714+
})
715+
}
716+
659717
func checkExists(resourceName string) resource.TestCheckFunc {
660718
return func(s *terraform.State) error {
661719
rs, ok := s.RootModule().Resources[resourceName]
@@ -788,6 +846,34 @@ func configSingleProvider(projectID, name string) string {
788846
`, projectID, name)
789847
}
790848

849+
func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) string {
850+
return fmt.Sprintf(`
851+
resource "mongodbatlas_advanced_cluster" "test" {
852+
project_id = %[1]q
853+
name = %[2]q
854+
855+
cluster_type = "REPLICASET"
856+
global_cluster_self_managed_sharding = true # invalid, can only by used with GEOSHARDED clusters
857+
858+
replication_specs {
859+
region_configs {
860+
electable_specs {
861+
instance_size = "M10"
862+
node_count = 3
863+
}
864+
analytics_specs {
865+
instance_size = "M10"
866+
node_count = 1
867+
}
868+
provider_name = "AWS"
869+
priority = 7
870+
region_name = "US_WEST_2"
871+
}
872+
}
873+
}
874+
`, projectID, name)
875+
}
876+
791877
func configMultiCloud(orgID, projectName, name string) string {
792878
return fmt.Sprintf(`
793879
resource "mongodbatlas_project" "cluster_project" {
@@ -1063,7 +1149,7 @@ func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p
10631149
`, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize())
10641150
}
10651151

1066-
func configMultiZoneWithShards(orgID, projectName, name, numShardsFirstZone, numShardsSecondZone string) string {
1152+
func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string {
10671153
return fmt.Sprintf(`
10681154
resource "mongodbatlas_project" "cluster_project" {
10691155
org_id = %[1]q
@@ -1076,10 +1162,11 @@ func configMultiZoneWithShards(orgID, projectName, name, numShardsFirstZone, num
10761162
backup_enabled = false
10771163
mongo_db_major_version = "7.0"
10781164
cluster_type = "GEOSHARDED"
1165+
global_cluster_self_managed_sharding = %[6]t
10791166
10801167
replication_specs {
10811168
zone_name = "zone n1"
1082-
num_shards = %[4]q
1169+
num_shards = %[4]d
10831170
10841171
region_configs {
10851172
electable_specs {
@@ -1098,7 +1185,7 @@ func configMultiZoneWithShards(orgID, projectName, name, numShardsFirstZone, num
10981185
10991186
replication_specs {
11001187
zone_name = "zone n2"
1101-
num_shards = %[5]q
1188+
num_shards = %[5]d
11021189
11031190
region_configs {
11041191
electable_specs {
@@ -1115,5 +1202,10 @@ func configMultiZoneWithShards(orgID, projectName, name, numShardsFirstZone, num
11151202
}
11161203
}
11171204
}
1118-
`, orgID, projectName, name, numShardsFirstZone, numShardsSecondZone)
1205+
1206+
data "mongodbatlas_advanced_cluster" "test" {
1207+
project_id = mongodbatlas_advanced_cluster.test.project_id
1208+
name = mongodbatlas_advanced_cluster.test.name
1209+
}
1210+
`, orgID, projectName, name, numShardsFirstZone, numShardsSecondZone, selfManagedSharding)
11191211
}

internal/service/cluster/resource_cluster_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1127,8 +1127,8 @@ func TestAccCluster_basicGCPRegionNameWesternUS(t *testing.T) {
11271127
func TestAccCluster_basicGCPRegionNameUSWest2(t *testing.T) {
11281128
var (
11291129
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
1130-
projectName = acc.RandomProjectName()
1131-
clusterName = acc.RandomClusterName() // No ProjectIDExecution to avoid cross-region limits because no AWS
1130+
projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because no AWS
1131+
clusterName = acc.RandomClusterName()
11321132
regionName = "US_WEST_2"
11331133
)
11341134

website/docs/d/advanced_cluster.html.markdown

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ In addition to all arguments above, the following attributes are exported:
6666
* `termination_protection_enabled` - Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster.
6767
* `version_release_system` - Release cadence that Atlas uses for this cluster.
6868
* `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details.
69-
69+
* `global_cluster_self_managed_sharding` - Flag that indicates if cluster uses Atlas-Managed Sharding (false) or Self-Managed Sharding (true).
7070

7171
### bi_connector_config
7272

website/docs/d/advanced_clusters.html.markdown

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ In addition to all arguments above, the following attributes are exported:
6868
* `termination_protection_enabled` - Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster.
6969
* `version_release_system` - Release cadence that Atlas uses for this cluster.
7070
* `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details.
71-
71+
* `global_cluster_self_managed_sharding` - Flag that indicates if cluster uses Atlas-Managed Sharding (false) or Self-Managed Sharding (true).
7272

7373
### bi_connector_config
7474

0 commit comments

Comments
 (0)