Skip to content

Commit f9f644d

Browse files
authored
chore: Addresses "todo" comments from code (#3151)
* resource schema * test * include_deleted_with_retained_backups * adv_config * use_replication_spec_per_shard
1 parent 8c9d6c8 commit f9f644d

File tree

3 files changed

+23
-29
lines changed

3 files changed

+23
-29
lines changed

internal/service/advancedclustertpf/plural_data_source.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,8 @@ func (d *pluralDS) readClusters(ctx context.Context, diags *diag.Diagnostics, pl
6666
return nil, diags
6767
}
6868
outs := &TFModelPluralDS{
69-
ProjectID: pluralModel.ProjectID,
70-
UseReplicationSpecPerShard: pluralModel.UseReplicationSpecPerShard,
71-
IncludeDeletedWithRetainedBackups: pluralModel.IncludeDeletedWithRetainedBackups,
69+
ProjectID: pluralModel.ProjectID,
70+
UseReplicationSpecPerShard: pluralModel.UseReplicationSpecPerShard,
7271
}
7372
for i := range list {
7473
clusterResp := &list[i]

internal/service/advancedclustertpf/schema.go

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -208,18 +208,18 @@ func resourceSchema(ctx context.Context) schema.Schema {
208208
"container_id": schema.MapAttribute{
209209
ElementType: types.StringType,
210210
Computed: true,
211-
MarkdownDescription: "container_id", // TODO: add description
211+
MarkdownDescription: "A key-value map of the Network Peering Container ID(s) for the configuration specified in region_configs. The Container ID is the id of the container created when the first cluster in the region (AWS/Azure) or project (GCP) was created.",
212212
},
213213
"external_id": schema.StringAttribute{
214214
Computed: true,
215-
MarkdownDescription: "external_id", // TODO: add description
215+
MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the replication object for a shard in a Cluster. This value corresponds to Shard ID displayed in the UI.",
216216
},
217217
"num_shards": schema.Int64Attribute{
218218
DeprecationMessage: deprecationMsgOldSchema("num_shards"),
219219
Default: int64default.StaticInt64(1),
220220
Computed: true,
221221
Optional: true,
222-
MarkdownDescription: "num_shards", // TODO: add description
222+
MarkdownDescription: "Number of shards up to 50 to deploy for a sharded cluster.",
223223
},
224224
"region_configs": schema.ListNestedAttribute{
225225
Required: true,
@@ -339,29 +339,26 @@ func pluralDataSourceSchema(ctx context.Context) dsschema.Schema {
339339
return conversion.PluralDataSourceSchemaFromResource(resourceSchema(ctx), &conversion.PluralDataSourceSchemaRequest{
340340
RequiredFields: []string{"project_id"},
341341
OverridenRootFields: map[string]dsschema.Attribute{
342-
"use_replication_spec_per_shard": dsschema.BoolAttribute{ // TODO: added as in current resource
343-
Optional: true,
344-
MarkdownDescription: "use_replication_spec_per_shard", // TODO: add documentation
345-
},
346-
"include_deleted_with_retained_backups": dsschema.BoolAttribute{ // TODO: not in current resource, decide if keep
347-
Optional: true,
348-
MarkdownDescription: "Flag that indicates whether to return Clusters with retain backups.",
349-
},
342+
"use_replication_spec_per_shard": useReplicationSpecPerShardSchema(),
350343
},
351344
OverridenFields: dataSourceOverridenFields(),
352345
})
353346
}
354347

355348
func dataSourceOverridenFields() map[string]dsschema.Attribute {
356349
return map[string]dsschema.Attribute{
357-
"use_replication_spec_per_shard": dsschema.BoolAttribute{ // TODO: added as in current resource
358-
Optional: true,
359-
MarkdownDescription: "use_replication_spec_per_shard", // TODO: add documentation
360-
},
350+
"use_replication_spec_per_shard": useReplicationSpecPerShardSchema(),
361351
"accept_data_risks_and_force_replica_set_reconfig": nil,
362352
}
363353
}
364354

355+
func useReplicationSpecPerShardSchema() dsschema.BoolAttribute {
356+
return dsschema.BoolAttribute{
357+
Optional: true,
358+
MarkdownDescription: "Set this field to true to allow the data source to use the latest schema representing each shard with an individual replication_specs object. This enables representing clusters with independent shard scaling.",
359+
}
360+
}
361+
365362
func AutoScalingSchema() schema.SingleNestedAttribute {
366363
return schema.SingleNestedAttribute{
367364
Computed: true,
@@ -439,7 +436,7 @@ func AdvancedConfigurationSchema(ctx context.Context) schema.SingleNestedAttribu
439436
return schema.SingleNestedAttribute{
440437
Computed: true,
441438
Optional: true,
442-
MarkdownDescription: "advanced_configuration", // TODO: add description
439+
MarkdownDescription: "Additional settings for an Atlas cluster.",
443440
// Avoid adding optional-only attributes, if the block is removed and attributes are not null in the state we get unintentional plan changes after apply.
444441
// Avoid computed-optional with Default, if the block is removed and the attribute Default != state value we get unintentional plan changes after apply.
445442
Attributes: map[string]schema.Attribute{
@@ -501,17 +498,16 @@ func AdvancedConfigurationSchema(ctx context.Context) schema.SingleNestedAttribu
501498
MarkdownDescription: "Lifetime, in seconds, of multi-document transactions. Atlas considers the transactions that exceed this limit as expired and so aborts them through a periodic cleanup process.",
502499
},
503500
"default_read_concern": schema.StringAttribute{
504-
DeprecationMessage: deprecationMsgOldSchema("default_read_concern"),
505-
501+
DeprecationMessage: deprecationMsgOldSchema("default_read_concern"),
506502
Computed: true,
507503
Optional: true,
508-
MarkdownDescription: "default_read_concern", // TODO: add description
504+
MarkdownDescription: "Default level of acknowledgment requested from MongoDB for read operations set for this cluster.",
509505
},
510506
"fail_index_key_too_long": schema.BoolAttribute{
511507
DeprecationMessage: deprecationMsgOldSchema("fail_index_key_too_long"),
512508
Computed: true,
513509
Optional: true,
514-
MarkdownDescription: "fail_index_key_too_long", // TODO: add description
510+
MarkdownDescription: "When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them.",
515511
},
516512
"default_max_time_ms": schema.Int64Attribute{
517513
Computed: true,
@@ -605,10 +601,9 @@ type TFModelDS struct {
605601
}
606602

607603
type TFModelPluralDS struct {
608-
ProjectID types.String `tfsdk:"project_id"`
609-
Results []*TFModelDS `tfsdk:"results"`
610-
UseReplicationSpecPerShard types.Bool `tfsdk:"use_replication_spec_per_shard"` // TODO: added as in current resource
611-
IncludeDeletedWithRetainedBackups types.Bool `tfsdk:"include_deleted_with_retained_backups"` // TODO: not in current resource, decide if keep
604+
ProjectID types.String `tfsdk:"project_id"`
605+
Results []*TFModelDS `tfsdk:"results"`
606+
UseReplicationSpecPerShard types.Bool `tfsdk:"use_replication_spec_per_shard"`
612607
}
613608

614609
type TFBiConnectorModel struct {

internal/testutil/acc/advanced_cluster_preview_provider_v2.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ func convertAttrs(t *testing.T, name string, writeBody *hclwrite.Body, isList bo
137137
break
138138
}
139139
vals = append(vals, getOneAttr(t, hcl.GetBlockBody(t, match)))
140-
writeBody.RemoveBlock(match) // TODO: RemoveBlock doesn't remove newline just after the block so an extra line is added
140+
writeBody.RemoveBlock(match) // RemoveBlock doesn't remove newline just after the block so an extra line is added
141141
}
142142
if len(vals) == 0 {
143143
return
@@ -162,7 +162,7 @@ func convertKeyValueAttrs(t *testing.T, name string, writeBody *hclwrite.Body) {
162162
key := attrs.GetAttr("key")
163163
value := attrs.GetAttr("value")
164164
vals[key.AsString()] = value
165-
writeBody.RemoveBlock(match) // TODO: RemoveBlock doesn't remove newline just after the block so an extra line is added
165+
writeBody.RemoveBlock(match) // RemoveBlock doesn't remove newline just after the block so an extra line is added
166166
}
167167
if len(vals) > 0 {
168168
writeBody.SetAttributeValue(name, cty.ObjectVal(vals))

0 commit comments

Comments
 (0)