Skip to content

Commit 3ef174b

Browse files
committed
allow numerical and variable num_shards in clu2adv
1 parent 4609386 commit 3ef174b

File tree

6 files changed

+324
-58
lines changed

6 files changed

+324
-58
lines changed

README.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ dynamic "replication_specs" {
120120

121121
### Limitations
122122

123-
- [`num_shards`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/cluster#num_shards-2) in `replication_specs` must be a numeric [literal expression](https://developer.hashicorp.com/nomad/docs/job-specification/hcl2/expressions#literal-expressions), e.g. `var.num_shards` is not supported. This is to allow creating a `replication_specs` element per shard in `mongodbatlas_advanced_cluster`. This limitation doesn't apply if you're using `dynamic` blocks in `regions_config` or `replication_specs`.
124123
- `dynamic` blocks are supported with some [limitations](./docs/guide_clu2adv_dynamic_block.md).
125124

126125
## Feedback

internal/convert/clu2adv.go

Lines changed: 121 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -181,46 +181,135 @@ func fillReplicationSpecs(resourceb *hclwrite.Body, root attrVals) error {
181181
resourceb.SetAttributeRaw(nRepSpecs, d.tokens)
182182
return nil
183183
}
184-
// at least one replication_specs exists here, if not it would be a free tier cluster
185-
var specbs []*hclwrite.Body
184+
185+
// Collect all replication_specs blocks first
186+
var repSpecBlocks []*hclwrite.Block
186187
for {
187-
var (
188-
specSrc = resourceb.FirstMatchingBlock(nRepSpecs, nil)
189-
spec = hclwrite.NewEmptyFile()
190-
specb = spec.Body()
191-
)
192-
if specSrc == nil {
188+
block := resourceb.FirstMatchingBlock(nRepSpecs, nil)
189+
if block == nil {
193190
break
194191
}
195-
specbSrc := specSrc.Body()
196-
d, err := fillWithDynamicRegionConfigs(specbSrc, root, false)
197-
if err != nil {
198-
return err
199-
}
200-
if d.IsPresent() {
201-
resourceb.RemoveBlock(specSrc)
202-
resourceb.SetAttributeRaw(nRepSpecs, d.tokens)
203-
return nil
204-
}
205-
// ok to fail as zone_name is optional
206-
_ = hcl.MoveAttr(specbSrc, specb, nZoneName, nZoneName, errRepSpecs)
207-
shards := specbSrc.GetAttribute(nNumShards)
208-
if shards == nil {
209-
return fmt.Errorf("%s: %s not found", errRepSpecs, nNumShards)
192+
resourceb.RemoveBlock(block)
193+
repSpecBlocks = append(repSpecBlocks, block)
194+
}
195+
196+
if len(repSpecBlocks) == 0 {
197+
return fmt.Errorf("%s: no replication_specs found", errRepSpecs)
198+
}
199+
200+
// Check if any replication_specs has a variable num_shards
201+
hasVariableNumShards := false
202+
for _, block := range repSpecBlocks {
203+
shardsAttr := block.Body().GetAttribute(nNumShards)
204+
if shardsAttr != nil {
205+
_, err := hcl.GetAttrInt(shardsAttr, errNumShards)
206+
if err != nil {
207+
hasVariableNumShards = true
208+
break
209+
}
210210
}
211-
shardsVal, err := hcl.GetAttrInt(shards, errNumShards)
212-
if err != nil {
213-
return err
211+
}
212+
213+
// If we have any variable num_shards, we need to use concat
214+
if hasVariableNumShards {
215+
var concatParts []hclwrite.Tokens
216+
217+
for _, block := range repSpecBlocks {
218+
spec := hclwrite.NewEmptyFile()
219+
specb := spec.Body()
220+
specbSrc := block.Body()
221+
222+
// Check for dynamic region configs
223+
d, err := fillWithDynamicRegionConfigs(specbSrc, root, false)
224+
if err != nil {
225+
return err
226+
}
227+
if d.IsPresent() {
228+
concatParts = append(concatParts, d.tokens)
229+
continue
230+
}
231+
232+
// Handle zone_name
233+
_ = hcl.MoveAttr(specbSrc, specb, nZoneName, nZoneName, errRepSpecs)
234+
235+
// Handle num_shards
236+
shardsAttr := specbSrc.GetAttribute(nNumShards)
237+
if shardsAttr == nil {
238+
return fmt.Errorf("%s: %s not found", errRepSpecs, nNumShards)
239+
}
240+
241+
shardsVal, err := hcl.GetAttrInt(shardsAttr, errNumShards)
242+
243+
if errConfig := fillRegionConfigs(specb, specbSrc, root); errConfig != nil {
244+
return errConfig
245+
}
246+
247+
if err != nil {
248+
// num_shards is a variable/expression
249+
shardsExpr := hcl.GetAttrExpr(shardsAttr)
250+
forExpr := fmt.Sprintf("for i in range(%s) :", shardsExpr)
251+
tokens := hcl.TokensFromExpr(forExpr)
252+
tokens = append(tokens, hcl.TokensObject(specb)...)
253+
concatParts = append(concatParts, hcl.EncloseBracketsNewLines(tokens))
254+
} else {
255+
// num_shards is a literal number - create explicit array
256+
var specs []*hclwrite.Body
257+
for range shardsVal {
258+
specs = append(specs, specb)
259+
}
260+
concatParts = append(concatParts, hcl.TokensArray(specs))
261+
}
214262
}
215-
if err := fillRegionConfigs(specb, specbSrc, root); err != nil {
216-
return err
263+
264+
// Use concat to combine all parts
265+
if len(concatParts) > 1 {
266+
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensFuncConcat(concatParts...))
267+
} else {
268+
resourceb.SetAttributeRaw(nRepSpecs, concatParts[0])
217269
}
218-
for range shardsVal {
219-
specbs = append(specbs, specb)
270+
} else {
271+
// All num_shards are numeric, use simple array
272+
var specbs []*hclwrite.Body
273+
for _, block := range repSpecBlocks {
274+
spec := hclwrite.NewEmptyFile()
275+
specb := spec.Body()
276+
specbSrc := block.Body()
277+
278+
// Check for dynamic region configs
279+
d, err := fillWithDynamicRegionConfigs(specbSrc, root, false)
280+
if err != nil {
281+
return err
282+
}
283+
if d.IsPresent() {
284+
// For dynamic blocks that have numerical num_shards
285+
// Extract the tokens and add to array
286+
// This is complex, for now just return the dynamic block as is
287+
resourceb.SetAttributeRaw(nRepSpecs, d.tokens)
288+
return nil
289+
}
290+
291+
// Handle zone_name
292+
_ = hcl.MoveAttr(specbSrc, specb, nZoneName, nZoneName, errRepSpecs)
293+
294+
// Handle num_shards
295+
shardsAttr := specbSrc.GetAttribute(nNumShards)
296+
if shardsAttr == nil {
297+
return fmt.Errorf("%s: %s not found", errRepSpecs, nNumShards)
298+
}
299+
300+
shardsVal, _ := hcl.GetAttrInt(shardsAttr, errNumShards)
301+
302+
if err := fillRegionConfigs(specb, specbSrc, root); err != nil {
303+
return err
304+
}
305+
306+
for range shardsVal {
307+
specbs = append(specbs, specb)
308+
}
220309
}
221-
resourceb.RemoveBlock(specSrc)
310+
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArray(specbs))
222311
}
223-
resourceb.SetAttributeRaw(nRepSpecs, hcl.TokensArray(specbs))
312+
224313
return nil
225314
}
226315

internal/convert/testdata/clu2adv/errors.json

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,5 @@
44
"free_cluster_missing_attribute": "free cluster (because no replication_specs): attribute backing_provider_name not found",
55
"regions_config_missing_priority": "setting replication_specs: attribute priority not found",
66
"replication_specs_missing_num_shards": "num_shards not found",
7-
"replication_specs_missing_regions_config": "setting replication_specs: regions_config not found",
8-
"replication_specs_non_literal_num_shards": "setting num_shards: failed to evaluate number"
7+
"replication_specs_missing_regions_config": "setting replication_specs: regions_config not found"
98
}

internal/convert/testdata/clu2adv/multi_replication_specs.in.tf

Lines changed: 72 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
resource "mongodbatlas_cluster" "multirep" {
1+
resource "mongodbatlas_cluster" "basic" {
22
project_id = var.project_id
33
name = "multirep"
44
disk_size_gb = 80
@@ -27,7 +27,7 @@ resource "mongodbatlas_cluster" "multirep" {
2727
}
2828
}
2929

30-
resource "mongodbatlas_cluster" "geo" {
30+
resource "mongodbatlas_cluster" "multiple_numerical_num_shards" {
3131
project_id = "1234"
3232
name = "geo"
3333
disk_size_gb = 80
@@ -57,3 +57,73 @@ resource "mongodbatlas_cluster" "geo" {
5757
}
5858
}
5959
}
60+
61+
resource "mongodbatlas_cluster" "variable_num_shards" {
62+
project_id = var.project_id
63+
name = "multirep"
64+
cluster_type = "GEOSHARDED"
65+
provider_name = "AWS"
66+
provider_instance_size_name = "M10"
67+
replication_specs {
68+
zone_name = "Zone 1"
69+
num_shards = var.num_shards
70+
regions_config {
71+
region_name = "US_EAST_1"
72+
electable_nodes = 3
73+
priority = 7
74+
}
75+
}
76+
}
77+
78+
resource "mongodbatlas_cluster" "multiple_variable_num_shards" {
79+
project_id = var.project_id
80+
name = "multirep"
81+
cluster_type = "GEOSHARDED"
82+
provider_name = "AWS"
83+
provider_instance_size_name = "M10"
84+
replication_specs {
85+
zone_name = "Zone 1"
86+
num_shards = var.num_shards_rep1
87+
regions_config {
88+
region_name = "US_EAST_1"
89+
electable_nodes = 3
90+
priority = 7
91+
}
92+
}
93+
replication_specs {
94+
zone_name = "Zone 2"
95+
num_shards = var.num_shards_rep2
96+
regions_config {
97+
region_name = "US_WEST_2"
98+
electable_nodes = 3
99+
priority = 7
100+
}
101+
}
102+
}
103+
104+
resource "mongodbatlas_cluster" "mix_variable_numerical_num_shards" {
105+
project_id = var.project_id
106+
name = "multirep"
107+
cluster_type = "GEOSHARDED"
108+
provider_name = "AWS"
109+
provider_instance_size_name = "M10"
110+
disk_size_gb = 80
111+
replication_specs {
112+
zone_name = "Zone 1"
113+
num_shards = 2
114+
regions_config {
115+
region_name = "US_EAST_1"
116+
electable_nodes = 3
117+
priority = 7
118+
}
119+
}
120+
replication_specs {
121+
zone_name = "Zone 2"
122+
num_shards = var.num_shards_rep2
123+
regions_config {
124+
region_name = "US_WEST_2"
125+
electable_nodes = 3
126+
priority = 7
127+
}
128+
}
129+
}

0 commit comments

Comments
 (0)