Skip to content

Commit 8ea8e34

Browse files
Bigtable: fixes updating clusters with autoscaling (#13088) (#9368)
[upstream:121fccf6fe1308cb98e0c0e80c2241b8935f889a] Signed-off-by: Modular Magician <[email protected]>
1 parent dd0483f commit 8ea8e34

File tree

3 files changed

+105
-1
lines changed

3 files changed

+105
-1
lines changed

.changelog/13088.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note: bug
2+
bigtable: fixed a bug where sometimes updating an instance's cluster list could result in an error if there was an existing cluster with autoscaling enabled
3+
```

google-beta/services/bigtable/resource_bigtable_instance.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -616,7 +616,6 @@ func expandBigtableClusters(clusters []interface{}, instanceID string, config *t
616616
InstanceID: instanceID,
617617
Zone: zone,
618618
ClusterID: cluster["cluster_id"].(string),
619-
NumNodes: int32(cluster["num_nodes"].(int)),
620619
StorageType: storageType,
621620
KMSKeyName: cluster["kms_key_name"].(string),
622621
}
@@ -629,6 +628,10 @@ func expandBigtableClusters(clusters []interface{}, instanceID string, config *t
629628
CPUTargetPercent: autoscaling_config["cpu_target"].(int),
630629
StorageUtilizationPerNode: autoscaling_config["storage_target"].(int),
631630
}
631+
} else {
632+
// We only set num_nodes if there is no auto-scaling config, since if
633+
// auto-scaling is enabled the number of live nodes is dynamic
634+
cluster_config.NumNodes = int32(cluster["num_nodes"].(int))
632635
}
633636
results = append(results, cluster_config)
634637
}

google-beta/services/bigtable/resource_bigtable_instance_test.go

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,58 @@ func TestAccBigtableInstance_createWithAutoscalingAndUpdate(t *testing.T) {
251251
})
252252
}
253253

254+
func TestAccBigtableInstance_createWithAutoscalingAndCreateAnotherOne(t *testing.T) {
255+
// bigtable instance does not use the shared HTTP client, this test creates an instance
256+
acctest.SkipIfVcr(t)
257+
t.Parallel()
258+
259+
instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10))
260+
261+
acctest.VcrTest(t, resource.TestCase{
262+
PreCheck: func() { acctest.AccTestPreCheck(t) },
263+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
264+
CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t),
265+
Steps: []resource.TestStep{
266+
{
267+
// Create Autoscaling config with 1 nodes. Default storage_target is set by service based on storage type.
268+
Config: testAccBigtableInstance_autoscalingClusterWithZone(instanceName, "us-central1-a", 1, 3, 70),
269+
Check: resource.ComposeTestCheckFunc(
270+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.cluster_id", fmt.Sprintf("%s-c1", instanceName)),
271+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.autoscaling_config.0.min_nodes", "1"),
272+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.autoscaling_config.0.max_nodes", "3"),
273+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.autoscaling_config.0.cpu_target", "70"),
274+
),
275+
},
276+
{
277+
ResourceName: "google_bigtable_instance.instance",
278+
ImportState: true,
279+
ImportStateVerify: true,
280+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
281+
},
282+
{
283+
// Create another cluster
284+
Config: testAccBigtableInstance_2autoscalingClustersWithZone(instanceName, "us-central1-a", "us-central1-b", 1, 3, 70),
285+
Check: resource.ComposeTestCheckFunc(
286+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.cluster_id", fmt.Sprintf("%s-c1", instanceName)),
287+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.autoscaling_config.0.min_nodes", "1"),
288+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.autoscaling_config.0.max_nodes", "3"),
289+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.autoscaling_config.0.cpu_target", "70"),
290+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.1.cluster_id", fmt.Sprintf("%s-c2", instanceName)),
291+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.1.autoscaling_config.0.min_nodes", "1"),
292+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.1.autoscaling_config.0.max_nodes", "3"),
293+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.1.autoscaling_config.0.cpu_target", "70"),
294+
),
295+
},
296+
{
297+
ResourceName: "google_bigtable_instance.instance",
298+
ImportState: true,
299+
ImportStateVerify: true,
300+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
301+
},
302+
},
303+
})
304+
}
305+
254306
func TestAccBigtableInstance_createWithAutoscalingAndUpdateWithStorageTarget(t *testing.T) {
255307
// bigtable instance does not use the shared HTTP client, this test creates an instance
256308
acctest.SkipIfVcr(t)
@@ -783,6 +835,52 @@ func testAccBigtableInstance_autoscalingCluster(instanceName string, min int, ma
783835
}`, instanceName, instanceName, min, max, cpuTarget)
784836
}
785837

838+
func testAccBigtableInstance_autoscalingClusterWithZone(instanceName string, zone string, min int, max int, cpuTarget int) string {
839+
return fmt.Sprintf(`resource "google_bigtable_instance" "instance" {
840+
name = "%s"
841+
cluster {
842+
cluster_id = "%s-c1"
843+
storage_type = "HDD"
844+
zone = "%s"
845+
autoscaling_config {
846+
min_nodes = %d
847+
max_nodes = %d
848+
cpu_target = %d
849+
}
850+
}
851+
deletion_protection = false
852+
853+
}`, instanceName, instanceName, zone, min, max, cpuTarget)
854+
}
855+
856+
func testAccBigtableInstance_2autoscalingClustersWithZone(instanceName string, zoneCluster1 string, zoneCluster2 string, min int, max int, cpuTarget int) string {
857+
return fmt.Sprintf(`resource "google_bigtable_instance" "instance" {
858+
name = "%s"
859+
cluster {
860+
cluster_id = "%s-c1"
861+
storage_type = "HDD"
862+
zone = "%s"
863+
autoscaling_config {
864+
min_nodes = %d
865+
max_nodes = %d
866+
cpu_target = %d
867+
}
868+
}
869+
cluster {
870+
cluster_id = "%s-c2"
871+
storage_type = "HDD"
872+
zone = "%s"
873+
autoscaling_config {
874+
min_nodes = %d
875+
max_nodes = %d
876+
cpu_target = %d
877+
}
878+
}
879+
deletion_protection = false
880+
881+
}`, instanceName, instanceName, zoneCluster1, min, max, cpuTarget, instanceName, zoneCluster2, min, max, cpuTarget)
882+
}
883+
786884
func autoscalingClusterConfigWithStorageTarget(instanceName string, min int, max int, cpuTarget int, storageTarget int) string {
787885
return fmt.Sprintf(`resource "google_bigtable_instance" "instance" {
788886
name = "%s"

0 commit comments

Comments
 (0)