Skip to content

Commit 1e459ff

Browse files
authored
fix: adds data_process_region field to online archive resource (#1634)
* first commit to add the new fields. * adds acceptance tests. * fixes wrong name and adds migration test. * fixes test expectations and updates documentation. * changes to computed and optional to deal with migration. * test. * test. * test. * test. * address comments. * address comments. * fix documentation. * Update mongodbatlas/resource_mongodbatlas_online_archive.go
1 parent 1f3fcd0 commit 1e459ff

6 files changed

+192
-1
lines changed

mongodbatlas/data_source_mongodbatlas_online_archive.go

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,22 @@ func schemaOnlineArchive() map[string]*schema.Schema {
130130
},
131131
},
132132
},
133+
"data_process_region": {
134+
Type: schema.TypeList,
135+
Computed: true,
136+
Elem: &schema.Resource{
137+
Schema: map[string]*schema.Schema{
138+
"cloud_provider": {
139+
Type: schema.TypeString,
140+
Computed: true,
141+
},
142+
"region": {
143+
Type: schema.TypeString,
144+
Computed: true,
145+
},
146+
},
147+
},
148+
},
133149
"schedule": {
134150
Type: schema.TypeList,
135151
Computed: true,

mongodbatlas/resource_mongodbatlas_online_archive.go

Lines changed: 58 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,28 @@ func getMongoDBAtlasOnlineArchiveSchema() map[string]*schema.Schema {
112112
},
113113
},
114114
},
115+
"data_process_region": {
116+
Type: schema.TypeList,
117+
MinItems: 1,
118+
MaxItems: 1,
119+
ConfigMode: schema.SchemaConfigModeAttr,
120+
Optional: true,
121+
Computed: true,
122+
Elem: &schema.Resource{
123+
Schema: map[string]*schema.Schema{
124+
"region": {
125+
Type: schema.TypeString,
126+
Optional: true,
127+
Computed: true,
128+
},
129+
"cloud_provider": {
130+
Type: schema.TypeString,
131+
Optional: true,
132+
Computed: true,
133+
},
134+
},
135+
},
136+
},
115137
"schedule": {
116138
Type: schema.TypeList,
117139
Optional: true,
@@ -367,6 +389,7 @@ func mapToArchivePayload(d *schema.ResourceData) admin.BackupOnlineArchiveCreate
367389

368390
requestInput.Criteria = mapCriteria(d)
369391
requestInput.DataExpirationRule = mapDataExpirationRule(d)
392+
requestInput.DataProcessRegion = mapDataProcessRegion(d)
370393
requestInput.Schedule = mapSchedule(d)
371394

372395
if partitions, ok := d.GetOk("partition_fields"); ok {
@@ -410,12 +433,13 @@ func resourceMongoDBAtlasOnlineArchiveUpdate(ctx context.Context, d *schema.Reso
410433
pausedHasChange := d.HasChange("paused")
411434
criteriaHasChange := d.HasChange("criteria")
412435
dataExpirationRuleHasChange := d.HasChange("data_expiration_rule")
436+
dataProcessRegionHasChange := d.HasChange("data_process_region")
413437
scheduleHasChange := d.HasChange("schedule")
414438

415439
collectionTypeHasChange := d.HasChange("collection_type")
416440

417441
// nothing to do, let's go
418-
if !pausedHasChange && !criteriaHasChange && !collectionTypeHasChange && !scheduleHasChange && !dataExpirationRuleHasChange {
442+
if !pausedHasChange && !criteriaHasChange && !collectionTypeHasChange && !scheduleHasChange && !dataExpirationRuleHasChange && !dataProcessRegionHasChange {
419443
return nil
420444
}
421445

@@ -441,6 +465,15 @@ func resourceMongoDBAtlasOnlineArchiveUpdate(ctx context.Context, d *schema.Reso
441465
}
442466
}
443467

468+
if dataProcessRegionHasChange {
469+
newDataProcessRegion := mapDataProcessRegion(d)
470+
if newDataProcessRegion == nil {
471+
request.DataProcessRegion = &admin.DataProcessRegion{}
472+
} else {
473+
request.DataProcessRegion = newDataProcessRegion
474+
}
475+
}
476+
444477
if scheduleHasChange {
445478
request.Schedule = mapSchedule(d)
446479
}
@@ -525,6 +558,15 @@ func fromOnlineArchiveToMap(in *admin.BackupOnlineArchive) map[string]any {
525558
schemaVals["data_expiration_rule"] = []any{dataExpirationRule}
526559
}
527560

561+
var dataProcessRegion map[string]any
562+
if in.DataProcessRegion != nil && (in.DataProcessRegion.CloudProvider != nil || in.DataProcessRegion.Region != nil) {
563+
dataProcessRegion = map[string]any{
564+
"cloud_provider": in.DataProcessRegion.CloudProvider,
565+
"region": in.DataProcessRegion.Region,
566+
}
567+
schemaVals["data_process_region"] = []any{dataProcessRegion}
568+
}
569+
528570
// partitions fields
529571
if len(in.PartitionFields) == 0 {
530572
return schemaVals
@@ -557,6 +599,21 @@ func mapDataExpirationRule(d *schema.ResourceData) *admin.DataExpirationRule {
557599
return nil
558600
}
559601

602+
func mapDataProcessRegion(d *schema.ResourceData) *admin.DataProcessRegion {
603+
if dataProcessRegions, ok := d.GetOk("data_process_region"); ok && len(dataProcessRegions.([]any)) > 0 {
604+
dataProcessRegion := dataProcessRegions.([]any)[0].(map[string]any)
605+
result := admin.DataProcessRegion{}
606+
if cloudProvider, ok := dataProcessRegion["cloud_provider"]; ok {
607+
result.CloudProvider = pointy.String(cloudProvider.(string))
608+
}
609+
if region, ok := dataProcessRegion["region"]; ok {
610+
result.Region = pointy.String(region.(string))
611+
}
612+
return &result
613+
}
614+
return nil
615+
}
616+
560617
func mapCriteria(d *schema.ResourceData) admin.Criteria {
561618
criteriaList := d.Get("criteria").([]any)
562619

mongodbatlas/resource_mongodbatlas_online_archive_test.go

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import (
55
"fmt"
66
"log"
77
"os"
8+
"regexp"
89
"testing"
910
"time"
1011

@@ -165,6 +166,55 @@ func TestAccBackupRSOnlineArchiveBasic(t *testing.T) {
165166
})
166167
}
167168

169+
func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) {
170+
var (
171+
cluster matlas.Cluster
172+
resourceName = "mongodbatlas_cluster.online_archive_test"
173+
onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive"
174+
onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive"
175+
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
176+
projectName = acctest.RandomWithPrefix("test-acc")
177+
name = fmt.Sprintf("test-acc-%s", acctest.RandString(10))
178+
cloudProvider = "AWS"
179+
)
180+
181+
resource.ParallelTest(t, resource.TestCase{
182+
PreCheck: func() { testAccPreCheckBasic(t) },
183+
ProtoV6ProviderFactories: testAccProviderV6Factories,
184+
CheckDestroy: testAccCheckMongoDBAtlasClusterDestroy,
185+
Steps: []resource.TestStep{
186+
{
187+
// We need this step to pupulate the cluster with Sample Data
188+
// The online archive won't work if the cluster does not have data
189+
Config: testAccBackupRSOnlineArchiveConfigFirstStep(orgID, projectName, name),
190+
Check: resource.ComposeTestCheckFunc(
191+
populateWithSampleData(resourceName, &cluster),
192+
),
193+
},
194+
{
195+
Config: testAccBackupRSOnlineArchiveConfigWithProcessRegion(orgID, projectName, name, cloudProvider, "SA_EAST_1"),
196+
Check: resource.ComposeTestCheckFunc(
197+
resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.cloud_provider", "AWS"),
198+
resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.region", "SA_EAST_1"),
199+
resource.TestCheckResourceAttr(onlineArchiveDataSourceName, "data_process_region.0.cloud_provider", "AWS"),
200+
resource.TestCheckResourceAttr(onlineArchiveDataSourceName, "data_process_region.0.region", "SA_EAST_1"),
201+
),
202+
},
203+
{
204+
Config: testAccBackupRSOnlineArchiveConfigWithProcessRegion(orgID, projectName, name, cloudProvider, "AP_SOUTH_1"),
205+
ExpectError: regexp.MustCompile("ONLINE_ARCHIVE_CANNOT_MODIFY_FIELD"),
206+
},
207+
{
208+
Config: testAccBackupRSOnlineArchiveConfigWithoutSchedule(orgID, projectName, name),
209+
Check: resource.ComposeTestCheckFunc(
210+
resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.cloud_provider", "AWS"),
211+
resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.region", "SA_EAST_1"),
212+
),
213+
},
214+
},
215+
})
216+
}
217+
168218
func populateWithSampleData(resourceName string, cluster *matlas.Cluster) resource.TestCheckFunc {
169219
return func(s *terraform.State) error {
170220
conn := testMongoDBClient.(*MongoDBClient).Atlas
@@ -331,6 +381,59 @@ func testAccBackupRSOnlineArchiveConfigWithoutSchedule(orgID, projectName, clust
331381
`, testAccBackupRSOnlineArchiveConfigFirstStep(orgID, projectName, clusterName))
332382
}
333383

384+
func testAccBackupRSOnlineArchiveConfigWithProcessRegion(orgID, projectName, clusterName, cloudProvider, region string) string {
385+
return fmt.Sprintf(`
386+
%s
387+
resource "mongodbatlas_online_archive" "users_archive" {
388+
project_id = mongodbatlas_cluster.online_archive_test.project_id
389+
cluster_name = mongodbatlas_cluster.online_archive_test.name
390+
coll_name = "listingsAndReviews"
391+
collection_type = "STANDARD"
392+
db_name = "sample_airbnb"
393+
394+
criteria {
395+
type = "DATE"
396+
date_field = "last_review"
397+
date_format = "ISODATE"
398+
expire_after_days = 2
399+
}
400+
401+
partition_fields {
402+
field_name = "last_review"
403+
order = 0
404+
}
405+
406+
partition_fields {
407+
field_name = "maximum_nights"
408+
order = 1
409+
}
410+
411+
partition_fields {
412+
field_name = "name"
413+
order = 2
414+
}
415+
416+
data_process_region {
417+
cloud_provider = %[2]q
418+
region = %[3]q
419+
}
420+
421+
sync_creation = true
422+
}
423+
424+
data "mongodbatlas_online_archive" "read_archive" {
425+
project_id = mongodbatlas_online_archive.users_archive.project_id
426+
cluster_name = mongodbatlas_online_archive.users_archive.cluster_name
427+
archive_id = mongodbatlas_online_archive.users_archive.archive_id
428+
}
429+
430+
data "mongodbatlas_online_archives" "all" {
431+
project_id = mongodbatlas_online_archive.users_archive.project_id
432+
cluster_name = mongodbatlas_online_archive.users_archive.cluster_name
433+
}
434+
`, testAccBackupRSOnlineArchiveConfigFirstStep(orgID, projectName, clusterName), cloudProvider, region)
435+
}
436+
334437
func testAccBackupRSOnlineArchiveConfigFirstStep(orgID, projectName, clusterName string) string {
335438
return fmt.Sprintf(`
336439
resource "mongodbatlas_project" "cluster_project" {

website/docs/d/online_archive.html.markdown

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ data "mongodbatlas_online_archive" "test" {
3636
* `collection_type` - Type of MongoDB collection that you want to return. This value can be "TIMESERIES" or "STANDARD". Default is "STANDARD".
3737
* `criteria` - Criteria to use for archiving data. See [criteria](#criteria).
3838
* `data_expiration_rule` - Rule for specifying when data should be deleted from the archive. See [data expiration rule](#data-expiration-rule).
39+
* `data_process_region` - Settings to configure the region where you wish to store your archived data. See [data process region](#data-process-region).
3940
* `schedule` - Regular frequency and duration when archiving process occurs. See [schedule](#schedule).
4041
* `partition_fields` - Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Queries that don’t contain the specified fields require a full collection scan of all archived documents, which takes longer and increases your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived. See [partition fields](#partition).
4142
* `paused` - State of the online archive. This is required for pausing an active online archive or resuming a paused online archive. If the collection has another active online archive, the resume request fails.
@@ -51,6 +52,10 @@ data "mongodbatlas_online_archive" "test" {
5152
### Data Expiration Rule
5253
* `expire_after_days` - Number of days used in the date criteria for nominating documents for deletion. Value must be between 7 and 9215.
5354

55+
### Data Process Region
56+
* `cloud_provider` - Human-readable label that identifies the Cloud service provider where you wish to store your archived data.
57+
* `region` - Human-readable label that identifies the geographic location of the region where you wish to store your archived data. For allowed values, see [MongoDB Atlas API documentation](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/v2/#tag/Online-Archive/operation/createOnlineArchive)
58+
5459

5560
### Schedule
5661

website/docs/d/online_archives.html.markdown

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ In addition to all arguments above, the following attributes are exported:
4141
* `collection_type` - Type of MongoDB collection that you want to return. This value can be "TIMESERIES" or "STANDARD". Default is "STANDARD".
4242
* `criteria` - Criteria to use for archiving data. See [criteria](#criteria).
4343
* `data_expiration_rule` - Rule for specifying when data should be deleted from the archive. See [data expiration rule](#data-expiration-rule).
44+
* `data_process_region` - Settings to configure the region where you wish to store your archived data. See [data process region](#data-process-region).
4445
* `schedule` - Regular frequency and duration when archiving process occurs. See [schedule](#schedule).
4546
* `partition_fields` - Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Queries that don’t contain the specified fields require a full collection scan of all archived documents, which takes longer and increases your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived. See [partition fields](#partition).
4647
* `paused` - State of the online archive. This is required for pausing an active online archive or resuming a paused online archive. If the collection has another active online archive, the resume request fails.
@@ -56,6 +57,10 @@ In addition to all arguments above, the following attributes are exported:
5657
### Data Expiration Rule
5758
* `expire_after_days` - Number of days used in the date criteria for nominating documents for deletion. Value must be between 7 and 9215.
5859

60+
### Data Process Region
61+
* `cloud_provider` - Human-readable label that identifies the Cloud service provider where you wish to store your archived data.
62+
* `region` - Human-readable label that identifies the geographic location of the region where you wish to store your archived data. For allowed values, see [MongoDB Atlas API documentation](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/v2/#tag/Online-Archive/operation/createOnlineArchive)
63+
5964
### Schedule
6065

6166
* `type` - Type of schedule (`DAILY`, `MONTHLY`, `WEEKLY`).

website/docs/r/online_archive.html.markdown

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ resource "mongodbatlas_online_archive" "test" {
8686
* `collection_type` - Type of MongoDB collection that you want to return. This value can be "TIMESERIES" or "STANDARD". Default is "STANDARD".
8787
* `criteria` - (Required) Criteria to use for archiving data. See [criteria](#criteria).
8888
* `data_expiration_rule` - (Optional) Rule for specifying when data should be deleted from the archive. See [data expiration rule](#data-expiration-rule).
89+
* `data_process_region` - (Optional) Settings to configure the region where you wish to store your archived data. See [data process region](#data-process-region). This field is immutable hence cannot be updated.
8990
* `schedule` - Regular frequency and duration when archiving process occurs. See [schedule](#schedule).
9091
* `partition_fields` - (Recommended) Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Queries that don’t contain the specified fields require a full collection scan of all archived documents, which takes longer and increases your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived. See [partition fields](#partition).
9192
* `paused` - (Optional) State of the online archive. This is required for pausing an active online archive or resuming a paused online archive. If the collection has another active online archive, the resume request fails.
@@ -113,6 +114,10 @@ The only field required for criteria type `CUSTOM`
113114

114115
* `expire_after_days` - Number of days used in the date criteria for nominating documents for deletion. Value must be between 7 and 9215.
115116

117+
### Data Process Region
118+
* `cloud_provider` - Human-readable label that identifies the Cloud service provider where you wish to store your archived data.
119+
* `region` - Human-readable label that identifies the geographic location of the region where you wish to store your archived data. For allowed values, see [MongoDB Atlas API documentation](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/v2/#tag/Online-Archive/operation/createOnlineArchive)
120+
116121
### Schedule
117122

118123
* `type` - Type of schedule (``DAILY`, `MONTHLY`, `WEEKLY`).

0 commit comments

Comments
 (0)