Skip to content

Commit 615b98a

Browse files
Bigtable: add NodeScalingFactor support for Clusters (#13391) (#22560)
[upstream:719c26481c64771332504bcde8ab5feac86a26d2] Signed-off-by: Modular Magician <[email protected]>
1 parent 90b8103 commit 615b98a

File tree

5 files changed

+154
-24
lines changed

5 files changed

+154
-24
lines changed

.changelog/13391.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
bigtable: added `cluster.node_scaling_factor` field to `google_bigtable_instance` resource
3+
```

google/services/bigtable/resource_bigtable_instance.go

Lines changed: 38 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,14 @@ func ResourceBigtableInstance() *schema.Resource {
154154
Computed: true,
155155
Description: `The state of the cluster`,
156156
},
157+
"node_scaling_factor": {
158+
Type: schema.TypeString,
159+
Optional: true,
160+
ForceNew: true,
161+
Default: "NodeScalingFactor1X",
162+
ValidateFunc: validation.StringInSlice([]string{"NodeScalingFactor1X", "NodeScalingFactor2X"}, false),
163+
Description: `The node scaling factor of this cluster. One of "NodeScalingFactor1X" or "NodeScalingFactor2X". Defaults to "NodeScalingFactor1X".`,
164+
},
157165
},
158166
},
159167
},
@@ -523,13 +531,22 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} {
523531
storageType = "HDD"
524532
}
525533

534+
var nodeScalingFactor string
535+
switch c.NodeScalingFactor {
536+
case bigtable.NodeScalingFactor1X:
537+
nodeScalingFactor = "NodeScalingFactor1X"
538+
case bigtable.NodeScalingFactor2X:
539+
nodeScalingFactor = "NodeScalingFactor2X"
540+
}
541+
526542
cluster := map[string]interface{}{
527-
"zone": c.Zone,
528-
"num_nodes": c.ServeNodes,
529-
"cluster_id": c.Name,
530-
"storage_type": storageType,
531-
"kms_key_name": c.KMSKeyName,
532-
"state": c.State,
543+
"zone": c.Zone,
544+
"num_nodes": c.ServeNodes,
545+
"cluster_id": c.Name,
546+
"storage_type": storageType,
547+
"kms_key_name": c.KMSKeyName,
548+
"state": c.State,
549+
"node_scaling_factor": nodeScalingFactor,
533550
}
534551
if c.AutoscalingConfig != nil {
535552
cluster["autoscaling_config"] = make([]map[string]interface{}, 1)
@@ -612,12 +629,21 @@ func expandBigtableClusters(clusters []interface{}, instanceID string, config *t
612629
storageType = bigtable.HDD
613630
}
614631

632+
var nodeScalingFactor bigtable.NodeScalingFactor
633+
switch cluster["node_scaling_factor"].(string) {
634+
case "NodeScalingFactor1X":
635+
nodeScalingFactor = bigtable.NodeScalingFactor1X
636+
case "NodeScalingFactor2X":
637+
nodeScalingFactor = bigtable.NodeScalingFactor2X
638+
}
639+
615640
cluster_config := bigtable.ClusterConfig{
616-
InstanceID: instanceID,
617-
Zone: zone,
618-
ClusterID: cluster["cluster_id"].(string),
619-
StorageType: storageType,
620-
KMSKeyName: cluster["kms_key_name"].(string),
641+
InstanceID: instanceID,
642+
Zone: zone,
643+
ClusterID: cluster["cluster_id"].(string),
644+
StorageType: storageType,
645+
KMSKeyName: cluster["kms_key_name"].(string),
646+
NodeScalingFactor: nodeScalingFactor,
621647
}
622648
autoscaling_configs := cluster["autoscaling_config"].([]interface{})
623649
if len(autoscaling_configs) > 0 {
@@ -756,7 +782,7 @@ func resourceBigtableInstanceClusterReorderTypeListFunc(diff tpgresource.Terrafo
756782
return err
757783
}
758784

759-
// Clusters can't have their zone, storage_type or kms_key_name updated,
785+
// Clusters can't have their zone, storage_type, kms_key_name, or node_scaling_factor updated,
760786
// ForceNew if it's changed. This will show a diff with the old state on
761787
// the left side and the unmodified new state on the right and the ForceNew
762788
// attributed to the _old state index_ even if the diff appears to have moved.

google/services/bigtable/resource_bigtable_instance_internal_test.go

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -169,24 +169,28 @@ func TestUnitBigtable_flattenBigtableCluster(t *testing.T) {
169169
"storage_target": 60,
170170
},
171171
},
172+
// unspecified node scaling factor in input will lead to an empty string here
173+
"node_scaling_factor": "",
172174
},
173175
},
174176
"HDD manual scaling": {
175177
clusterInfo: &bigtable.ClusterInfo{
176-
StorageType: bigtable.HDD,
177-
Zone: "zone2",
178-
ServeNodes: 7,
179-
Name: "hdd-cluster",
180-
KMSKeyName: "KMS",
181-
State: "READY",
178+
StorageType: bigtable.HDD,
179+
Zone: "zone2",
180+
ServeNodes: 7,
181+
Name: "hdd-cluster",
182+
KMSKeyName: "KMS",
183+
State: "READY",
184+
NodeScalingFactor: bigtable.NodeScalingFactor2X,
182185
},
183186
want: map[string]interface{}{
184-
"zone": "zone2",
185-
"num_nodes": 7,
186-
"cluster_id": "hdd-cluster",
187-
"storage_type": "HDD",
188-
"kms_key_name": "KMS",
189-
"state": "READY",
187+
"zone": "zone2",
188+
"num_nodes": 7,
189+
"cluster_id": "hdd-cluster",
190+
"storage_type": "HDD",
191+
"kms_key_name": "KMS",
192+
"state": "READY",
193+
"node_scaling_factor": "NodeScalingFactor2X",
190194
},
191195
},
192196
}

google/services/bigtable/resource_bigtable_instance_test.go

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -548,6 +548,101 @@ func TestAccBigtableInstance_forceDestroyBackups(t *testing.T) {
548548
})
549549
}
550550

551+
func TestAccBigtableInstance_createWithNodeScalingFactorDefault(t *testing.T) {
552+
// bigtable instance does not use the shared HTTP client, this test creates an instance
553+
acctest.SkipIfVcr(t)
554+
t.Parallel()
555+
556+
instanceName := fmt.Sprintf("tf-test-nsf-%s", acctest.RandString(t, 10))
557+
558+
acctest.VcrTest(t, resource.TestCase{
559+
PreCheck: func() { acctest.AccTestPreCheck(t) },
560+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
561+
CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t),
562+
Steps: []resource.TestStep{
563+
{
564+
// Create config with nothing specified for node scaling factor.
565+
// Ensure that we get 1X back.
566+
Config: testAccBigtableInstance_nodeScalingFactor_allowDestroy(instanceName, 2, ""),
567+
Check: resource.ComposeTestCheckFunc(
568+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.num_nodes", "2"),
569+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor1X"),
570+
),
571+
},
572+
{
573+
ResourceName: "google_bigtable_instance.instance",
574+
ImportState: true,
575+
ImportStateVerify: true,
576+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
577+
},
578+
},
579+
})
580+
}
581+
582+
func TestAccBigtableInstance_createWithNodeScalingFactorThenUpdateViaForceNew(t *testing.T) {
583+
// bigtable instance does not use the shared HTTP client, this test creates an instance
584+
acctest.SkipIfVcr(t)
585+
t.Parallel()
586+
587+
instanceName := fmt.Sprintf("tf-test-nsf-%s", acctest.RandString(t, 10))
588+
589+
acctest.VcrTest(t, resource.TestCase{
590+
PreCheck: func() { acctest.AccTestPreCheck(t) },
591+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
592+
CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t),
593+
Steps: []resource.TestStep{
594+
{
595+
// Create config with node scaling factor as 2x.
596+
Config: testAccBigtableInstance_nodeScalingFactor_allowDestroy(instanceName, 2, "NodeScalingFactor2X"),
597+
Check: resource.ComposeTestCheckFunc(
598+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.num_nodes", "2"),
599+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor2X"),
600+
),
601+
},
602+
{
603+
ResourceName: "google_bigtable_instance.instance",
604+
ImportState: true,
605+
ImportStateVerify: true,
606+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
607+
},
608+
{
609+
// Updating the node scaling factor only possible without delete protection, as we need ForceNew
610+
Config: testAccBigtableInstance_nodeScalingFactor_allowDestroy(instanceName, 2, "NodeScalingFactor1X"),
611+
Check: resource.ComposeTestCheckFunc(
612+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.num_nodes", "2"),
613+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor1X"),
614+
),
615+
},
616+
{
617+
ResourceName: "google_bigtable_instance.instance",
618+
ImportState: true,
619+
ImportStateVerify: true,
620+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
621+
},
622+
},
623+
})
624+
}
625+
626+
func testAccBigtableInstance_nodeScalingFactor_allowDestroy(instanceName string, numNodes int, nodeScalingFactor string) string {
627+
nodeScalingFactorAttribute := ""
628+
if nodeScalingFactor != "" {
629+
nodeScalingFactorAttribute = fmt.Sprintf("node_scaling_factor = \"%s\"", nodeScalingFactor)
630+
}
631+
return fmt.Sprintf(`
632+
resource "google_bigtable_instance" "instance" {
633+
name = "%s"
634+
cluster {
635+
cluster_id = "%s"
636+
zone = "us-central1-b"
637+
num_nodes = %d
638+
storage_type = "SSD"
639+
%s
640+
}
641+
deletion_protection = false
642+
}
643+
`, instanceName, instanceName, numNodes, nodeScalingFactorAttribute)
644+
}
645+
551646
func testAccBigtableInstance_multipleClustersSameID(instanceName string) string {
552647
return fmt.Sprintf(`
553648
resource "google_bigtable_instance" "instance" {

website/docs/r/bigtable_instance.html.markdown

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,8 @@ If no value is set, Cloud Bigtable automatically allocates nodes based on your d
141141

142142
* `kms_key_name` - (Optional) Describes the Cloud KMS encryption key that will be used to protect the destination Bigtable cluster. The requirements for this key are: 1) The Cloud Bigtable service account associated with the project that contains this cluster must be granted the `cloudkms.cryptoKeyEncrypterDecrypter` role on the CMEK key. 2) Only regional keys can be used and the region of the CMEK key must match the region of the cluster.
143143

144+
* `node_scaling_factor` - (Optional) The node scaling factor for this cluster. One of `"NodeScalingFactor1X"` or `"NodeScalingFactor2X"`. Defaults to `"NodeScalingFactor1X"`. If `"NodeScalingFactor2X"` is specified, then `num_nodes`, `min_nodes`, and `max_nodes` would need to be specified in increments of 2. This value cannot be updated after the cluster is created.
145+
144146
-> **Note**: Removing the field entirely from the config will cause the provider to default to the backend value.
145147

146148
!> **Warning:** Modifying the `storage_type`, `zone` or `kms_key_name` of an existing cluster (by

0 commit comments

Comments
 (0)