Skip to content

Commit a5132e8

Browse files
authored
INTMDB-293: Added container_id in advanced cluster (#663)
* feat: added parameter container ids for advanced cluster * fixes linter * refactor: changed the name region_configs_containers_ids to container_id * refactor suggested by tony * changed emails for teams * fixes test
1 parent 923243f commit a5132e8

8 files changed

+125
-43
lines changed

mongodbatlas/data_source_mongodbatlas_advanced_cluster.go

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,6 +162,13 @@ func dataSourceMongoDBAtlasAdvancedCluster() *schema.Resource {
162162
},
163163
},
164164
},
165+
"container_id": {
166+
Type: schema.TypeMap,
167+
Elem: &schema.Schema{
168+
Type: schema.TypeString,
169+
},
170+
Computed: true,
171+
},
165172
"zone_name": {
166173
Type: schema.TypeString,
167174
Computed: true,
@@ -253,7 +260,12 @@ func dataSourceMongoDBAtlasAdvancedClusterRead(ctx context.Context, d *schema.Re
253260
return diag.FromErr(fmt.Errorf(errorClusterAdvancedSetting, "pit_enabled", clusterName, err))
254261
}
255262

256-
if err := d.Set("replication_specs", flattenAdvancedReplicationSpecs(cluster.ReplicationSpecs, nil)); err != nil {
263+
replicationSpecs, err := flattenAdvancedReplicationSpecs(ctx, cluster.ReplicationSpecs, d.Get("replication_specs").(*schema.Set).List(), d, conn)
264+
if err != nil {
265+
return diag.FromErr(fmt.Errorf(errorClusterAdvancedSetting, "replication_specs", clusterName, err))
266+
}
267+
268+
if err := d.Set("replication_specs", replicationSpecs); err != nil {
257269
return diag.FromErr(fmt.Errorf(errorClusterAdvancedSetting, "replication_specs", clusterName, err))
258270
}
259271

mongodbatlas/data_source_mongodbatlas_advanced_clusters.go

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,13 @@ func dataSourceMongoDBAtlasAdvancedClusters() *schema.Resource {
170170
},
171171
},
172172
},
173+
"container_id": {
174+
Type: schema.TypeMap,
175+
Elem: &schema.Schema{
176+
Type: schema.TypeString,
177+
},
178+
Computed: true,
179+
},
173180
"zone_name": {
174181
Type: schema.TypeString,
175182
Computed: true,
@@ -212,21 +219,25 @@ func dataSourceMongoDBAtlasAdvancedClustersRead(ctx context.Context, d *schema.R
212219
return diag.FromErr(fmt.Errorf("error reading advanced cluster list for project(%s): %s", projectID, err))
213220
}
214221

215-
if err := d.Set("results", flattenAdvancedClusters(ctx, conn, clusters.Results)); err != nil {
222+
if err := d.Set("results", flattenAdvancedClusters(ctx, conn, clusters.Results, d)); err != nil {
216223
return diag.FromErr(fmt.Errorf(errorClusterAdvancedSetting, "results", d.Id(), err))
217224
}
218225

219226
return nil
220227
}
221228

222-
func flattenAdvancedClusters(ctx context.Context, conn *matlas.Client, clusters []*matlas.AdvancedCluster) []map[string]interface{} {
229+
func flattenAdvancedClusters(ctx context.Context, conn *matlas.Client, clusters []*matlas.AdvancedCluster, d *schema.ResourceData) []map[string]interface{} {
223230
results := make([]map[string]interface{}, 0)
224231

225232
for i := range clusters {
226233
processArgs, _, err := conn.Clusters.GetProcessArgs(ctx, clusters[i].GroupID, clusters[i].Name)
227234
if err != nil {
228235
log.Printf("[WARN] Error setting `advanced_configuration` for the cluster(%s): %s", clusters[i].ID, err)
229236
}
237+
replicationSpecs, err := flattenAdvancedReplicationSpecs(ctx, clusters[i].ReplicationSpecs, nil, d, conn)
238+
if err != nil {
239+
log.Printf("[WARN] Error setting `replication_specs` for the cluster(%s): %s", clusters[i].ID, err)
240+
}
230241

231242
result := map[string]interface{}{
232243
"advanced_configuration": flattenProcessArgs(processArgs),
@@ -243,7 +254,7 @@ func flattenAdvancedClusters(ctx context.Context, conn *matlas.Client, clusters
243254
"name": clusters[i].Name,
244255
"paused": clusters[i].Paused,
245256
"pit_enabled": clusters[i].PitEnabled,
246-
"replication_specs": flattenAdvancedReplicationSpecs(clusters[i].ReplicationSpecs, nil),
257+
"replication_specs": replicationSpecs,
247258
"root_cert_type": clusters[i].RootCertType,
248259
"state_name": clusters[i].StateName,
249260
"version_release_system": clusters[i].VersionReleaseSystem,

mongodbatlas/data_source_mongodbatlas_clusters_test.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,6 @@ func TestAccDataSourceMongoDBAtlasClusters_basic(t *testing.T) {
4242
resource.TestCheckResourceAttrSet(dataSourceName, "results.#"),
4343
resource.TestCheckResourceAttrSet(dataSourceName, "results.0.replication_specs.#"),
4444
resource.TestCheckResourceAttrSet(dataSourceName, "results.0.name"),
45-
resource.TestCheckResourceAttr(dataSourceName, "results.0.labels.#", "1"),
46-
resource.TestCheckResourceAttr(dataSourceName, "results.0.auto_scaling_compute_enabled", "false"),
47-
resource.TestCheckResourceAttr(dataSourceName, "results.0.auto_scaling_compute_scale_down_enabled", "false"),
4845
resource.TestCheckResourceAttr(dataSourceName, "results.0.version_release_system", "LTS"),
4946
),
5047
},

mongodbatlas/resource_mongodbatlas_advanced_cluster.go

Lines changed: 90 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -222,6 +222,13 @@ func resourceMongoDBAtlasAdvancedCluster() *schema.Resource {
222222
},
223223
},
224224
},
225+
"container_id": {
226+
Type: schema.TypeMap,
227+
Elem: &schema.Schema{
228+
Type: schema.TypeString,
229+
},
230+
Computed: true,
231+
},
225232
"zone_name": {
226233
Type: schema.TypeString,
227234
Optional: true,
@@ -468,7 +475,12 @@ func resourceMongoDBAtlasAdvancedClusterRead(ctx context.Context, d *schema.Reso
468475
return diag.FromErr(fmt.Errorf(errorClusterAdvancedSetting, "pit_enabled", clusterName, err))
469476
}
470477

471-
if err := d.Set("replication_specs", flattenAdvancedReplicationSpecs(cluster.ReplicationSpecs, d.Get("replication_specs").(*schema.Set).List())); err != nil {
478+
replicationSpecs, err := flattenAdvancedReplicationSpecs(ctx, cluster.ReplicationSpecs, d.Get("replication_specs").(*schema.Set).List(), d, conn)
479+
if err != nil {
480+
return diag.FromErr(fmt.Errorf(errorClusterAdvancedSetting, "replication_specs", clusterName, err))
481+
}
482+
483+
if err := d.Set("replication_specs", replicationSpecs); err != nil {
472484
return diag.FromErr(fmt.Errorf(errorClusterAdvancedSetting, "replication_specs", clusterName, err))
473485
}
474486

@@ -833,27 +845,39 @@ func expandRegionConfigAutoScaling(tfList []interface{}) *matlas.AdvancedAutoSca
833845
return apiObject
834846
}
835847

836-
func flattenAdvancedReplicationSpec(apiObject *matlas.AdvancedReplicationSpec, tfMapObject map[string]interface{}) map[string]interface{} {
848+
func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *matlas.AdvancedReplicationSpec, tfMapObject map[string]interface{},
849+
d *schema.ResourceData, conn *matlas.Client) (map[string]interface{}, error) {
837850
if apiObject == nil {
838-
return nil
851+
return nil, nil
839852
}
840853

841854
tfMap := map[string]interface{}{}
842855
tfMap["num_shards"] = apiObject.NumShards
843856
tfMap["id"] = apiObject.ID
844857
if tfMapObject != nil {
845-
tfMap["region_configs"] = flattenAdvancedReplicationSpecRegionConfigs(apiObject.RegionConfigs, tfMapObject["region_configs"].(*schema.Set).List())
858+
object, containerIds, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.RegionConfigs, tfMapObject["region_configs"].(*schema.Set).List(), d, conn)
859+
if err != nil {
860+
return nil, err
861+
}
862+
tfMap["region_configs"] = object
863+
tfMap["container_id"] = containerIds
846864
} else {
847-
tfMap["region_configs"] = flattenAdvancedReplicationSpecRegionConfigs(apiObject.RegionConfigs, nil)
865+
object, containerIds, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.RegionConfigs, nil, d, conn)
866+
if err != nil {
867+
return nil, err
868+
}
869+
tfMap["region_configs"] = object
870+
tfMap["container_id"] = containerIds
848871
}
849872
tfMap["zone_name"] = apiObject.ZoneName
850873

851-
return tfMap
874+
return tfMap, nil
852875
}
853876

854-
func flattenAdvancedReplicationSpecs(apiObjects []*matlas.AdvancedReplicationSpec, tfMapObjects []interface{}) []map[string]interface{} {
877+
func flattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []*matlas.AdvancedReplicationSpec, tfMapObjects []interface{},
878+
d *schema.ResourceData, conn *matlas.Client) ([]map[string]interface{}, error) {
855879
if len(apiObjects) == 0 {
856-
return nil
880+
return nil, nil
857881
}
858882

859883
var tfList []map[string]interface{}
@@ -863,15 +887,20 @@ func flattenAdvancedReplicationSpecs(apiObjects []*matlas.AdvancedReplicationSpe
863887
continue
864888
}
865889

890+
var tfMapObject map[string]interface{}
891+
866892
if len(tfMapObjects) > 0 {
867-
tfMapObject := tfMapObjects[i].(map[string]interface{})
868-
tfList = append(tfList, flattenAdvancedReplicationSpec(apiObject, tfMapObject))
869-
} else {
870-
tfList = append(tfList, flattenAdvancedReplicationSpec(apiObject, nil))
893+
tfMapObject = tfMapObjects[i].(map[string]interface{})
871894
}
895+
896+
advancedReplicationSpec, err := flattenAdvancedReplicationSpec(ctx, apiObject, tfMapObject, d, conn)
897+
if err != nil {
898+
return nil, err
899+
}
900+
tfList = append(tfList, advancedReplicationSpec)
872901
}
873902

874-
return tfList
903+
return tfList, nil
875904
}
876905

877906
func flattenAdvancedReplicationSpecRegionConfig(apiObject *matlas.AdvancedRegionConfig, tfMapObject map[string]interface{}) map[string]interface{} {
@@ -908,12 +937,14 @@ func flattenAdvancedReplicationSpecRegionConfig(apiObject *matlas.AdvancedRegion
908937
return tfMap
909938
}
910939

911-
func flattenAdvancedReplicationSpecRegionConfigs(apiObjects []*matlas.AdvancedRegionConfig, tfMapObjects []interface{}) []map[string]interface{} {
940+
func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects []*matlas.AdvancedRegionConfig, tfMapObjects []interface{},
941+
d *schema.ResourceData, conn *matlas.Client) (tfResult []map[string]interface{}, containersIDs map[string]string, err error) {
912942
if len(apiObjects) == 0 {
913-
return nil
943+
return nil, nil, nil
914944
}
915945

916946
var tfList []map[string]interface{}
947+
containerIds := make(map[string]string)
917948

918949
for i, apiObject := range apiObjects {
919950
if apiObject == nil {
@@ -926,9 +957,21 @@ func flattenAdvancedReplicationSpecRegionConfigs(apiObjects []*matlas.AdvancedRe
926957
} else {
927958
tfList = append(tfList, flattenAdvancedReplicationSpecRegionConfig(apiObject, nil))
928959
}
960+
961+
if apiObject.ProviderName != "TENANT" {
962+
containers, _, err := conn.Containers.List(ctx, d.Get("project_id").(string),
963+
&matlas.ContainersListOptions{ProviderName: apiObject.ProviderName})
964+
if err != nil {
965+
return nil, nil, err
966+
}
967+
if result := getAdvancedClusterContainerID(containers, apiObject); result != "" {
968+
// Will print as "providerName:regionName" = "containerId" in terraform show
969+
containerIds[fmt.Sprintf("%s:%s", apiObject.ProviderName, apiObject.RegionName)] = result
970+
}
971+
}
929972
}
930973

931-
return tfList
974+
return tfList, containerIds, nil
932975
}
933976

934977
func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *matlas.Specs, providerName string, tfMapObjects []interface{}) []map[string]interface{} {
@@ -940,29 +983,25 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *matlas.Specs, pro
940983
tfMap := map[string]interface{}{}
941984

942985
if len(tfMapObjects) > 0 {
943-
checkObject := false
944-
checkObject = true
945986
tfMapObject := tfMapObjects[0].(map[string]interface{})
946987

947988
if providerName == "AWS" {
948-
if checkObject {
949-
if cast.ToInt64(apiObject.DiskIOPS) > 0 {
950-
if v, ok := tfMapObject["disk_iops"]; ok && v.(int) > 0 {
951-
tfMap["disk_iops"] = apiObject.DiskIOPS
952-
}
953-
}
954-
if v, ok := tfMapObject["ebs_volume_type"]; ok && v.(string) != "" {
955-
tfMap["ebs_volume_type"] = apiObject.EbsVolumeType
989+
if cast.ToInt64(apiObject.DiskIOPS) > 0 {
990+
if v, ok := tfMapObject["disk_iops"]; ok && v.(int) > 0 {
991+
tfMap["disk_iops"] = apiObject.DiskIOPS
956992
}
957993
}
958-
}
959-
tfMap["node_count"] = apiObject.NodeCount
960-
if checkObject {
961-
if v, ok := tfMapObject["instance_size"]; ok && v.(string) != "" {
962-
tfMap["instance_size"] = apiObject.InstanceSize
963-
tfList = append(tfList, tfMap)
994+
if v, ok := tfMapObject["ebs_volume_type"]; ok && v.(string) != "" {
995+
tfMap["ebs_volume_type"] = apiObject.EbsVolumeType
964996
}
965997
}
998+
if _, ok := tfMapObject["node_count"]; ok {
999+
tfMap["node_count"] = apiObject.NodeCount
1000+
}
1001+
if v, ok := tfMapObject["instance_size"]; ok && v.(string) != "" {
1002+
tfMap["instance_size"] = apiObject.InstanceSize
1003+
tfList = append(tfList, tfMap)
1004+
}
9661005
} else {
9671006
tfMap["disk_iops"] = apiObject.DiskIOPS
9681007
tfMap["ebs_volume_type"] = apiObject.EbsVolumeType
@@ -1031,7 +1070,7 @@ func replicationSpecsHashSet(v interface{}) int {
10311070
buf.WriteString(fmt.Sprintf("%d", m["num_shards"].(int)))
10321071
buf.WriteString(fmt.Sprintf("%+v", m["region_configs"].(*schema.Set)))
10331072
buf.WriteString(m["zone_name"].(string))
1034-
return HashCodeString(buf.String())
1073+
return schema.HashString(buf.String())
10351074
}
10361075

10371076
func updateAdvancedCluster(ctx context.Context, conn *matlas.Client, request *matlas.AdvancedCluster, projectID, name string) (*matlas.AdvancedCluster, *matlas.Response, error) {
@@ -1057,3 +1096,21 @@ func updateAdvancedCluster(ctx context.Context, conn *matlas.Client, request *ma
10571096

10581097
return cluster, resp, nil
10591098
}
1099+
1100+
func getAdvancedClusterContainerID(containers []matlas.Container, cluster *matlas.AdvancedRegionConfig) string {
1101+
if len(containers) != 0 {
1102+
for i := range containers {
1103+
if cluster.ProviderName == "GCP" {
1104+
return containers[i].ID
1105+
}
1106+
1107+
if containers[i].ProviderName == cluster.ProviderName &&
1108+
containers[i].Region == cluster.RegionName || // For Azure
1109+
containers[i].RegionName == cluster.RegionName { // For AWS
1110+
return containers[i].ID
1111+
}
1112+
}
1113+
}
1114+
1115+
return ""
1116+
}

mongodbatlas/resource_mongodbatlas_team_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ func TestAccResourceMongoDBAtlasTeam_basic(t *testing.T) {
3333
[]string{
3434
3535
36-
"marin.salinas@digitalonus.com",
36+
"edgar.lopez@digitalonus.com",
3737
},
3838
),
3939
Check: resource.ComposeTestCheckFunc(
@@ -47,7 +47,7 @@ func TestAccResourceMongoDBAtlasTeam_basic(t *testing.T) {
4747
{
4848
Config: testAccMongoDBAtlasTeamConfig(orgID, updatedName,
4949
[]string{
50-
"marin.salinas@digitalonus.com",
50+
"edgar.lopez@digitalonus.com",
5151
5252
},
5353
),
@@ -62,7 +62,7 @@ func TestAccResourceMongoDBAtlasTeam_basic(t *testing.T) {
6262
{
6363
Config: testAccMongoDBAtlasTeamConfig(orgID, updatedName,
6464
[]string{
65-
"marin.salinas@digitalonus.com",
65+
"edgar.lopez@digitalonus.com",
6666
6767
6868
},

website/docs/d/advanced_cluster.html.markdown

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ Key-value pairs that tag and categorize the cluster. Each key and value has a ma
8585

8686
* `num_shards` - Provide this value if you set a `cluster_type` of SHARDED or GEOSHARDED.
8787
* `region_configs` - Configuration for the hardware specifications for nodes set for a given regionEach `region_configs` object describes the region's priority in elections and the number and type of MongoDB nodes that Atlas deploys to the region. Each `region_configs` object must have either an `analytics_specs` object, `electable_specs` object, or `read_only_specs` object. See [below](#region_configs)
88+
* `container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container either created programmatically by the user before any clusters existed in a project or when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`.
8889
* `zone_name` - Name for the zone in a Global Cluster.
8990

9091

website/docs/d/advanced_clusters.html.markdown

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ Key-value pairs that tag and categorize the cluster. Each key and value has a ma
8787

8888
* `num_shards` - Provide this value if you set a `cluster_type` of SHARDED or GEOSHARDED.
8989
* `region_configs` - Configuration for the hardware specifications for nodes set for a given regionEach `region_configs` object describes the region's priority in elections and the number and type of MongoDB nodes that Atlas deploys to the region. Each `region_configs` object must have either an `analytics_specs` object, `electable_specs` object, or `read_only_specs` object. See [below](#region_configs)
90+
* `container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container either created programmatically by the user before any clusters existed in a project or when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`.
9091
* `zone_name` - Name for the zone in a Global Cluster.
9192

9293

website/docs/r/advanced_cluster.html.markdown

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ More information on considerations for using advanced clusters please see [Consi
1414

1515
-> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation.
1616

17+
-> **NOTE:** A network container is created for a advanced cluster to reside in if one does not yet exist in the project. To use this automatically created container with another resource, such as peering, the `container_id` is exported after creation.
18+
1719
## Example Usage
1820

1921

@@ -246,6 +248,7 @@ replication_specs {
246248

247249
* `num_shards` - (Required) Provide this value if you set a `cluster_type` of SHARDED or GEOSHARDED. Omit this value if you selected a `cluster_type` of REPLICASET. This API resource accepts 1 through 50, inclusive. This parameter defaults to 1. If you specify a `num_shards` value of 1 and a `cluster_type` of SHARDED, Atlas deploys a single-shard [sharded cluster](https://docs.atlas.mongodb.com/reference/glossary/#std-term-sharded-cluster). Don't create a sharded cluster with a single shard for production environments. Single-shard sharded clusters don't provide the same benefits as multi-shard configurations.
248250
* `region_configs` - (Optional) Configuration for the hardware specifications for nodes set for a given regionEach `region_configs` object describes the region's priority in elections and the number and type of MongoDB nodes that Atlas deploys to the region. Each `region_configs` object must have either an `analytics_specs` object, `electable_specs` object, or `read_only_specs` object. See [below](#region_configs)
251+
* `container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container either created programmatically by the user before any clusters existed in a project or when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`.
249252
* `zone_name` - (Optional) Name for the zone in a Global Cluster.
250253

251254

0 commit comments

Comments
 (0)