Skip to content

Commit 0747e26

Browse files
BigTable - NodeScalingFactor Force New BugFix (#15112) (#10744)
[upstream:23a2acd1f62aa8c5d09e3c5a026e07f69b7f2927] Signed-off-by: Modular Magician <[email protected]>
1 parent 0003f63 commit 0747e26

File tree

3 files changed

+192
-7
lines changed

3 files changed

+192
-7
lines changed

.changelog/15112.txt

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
```release-note:bug
2+
bigtable: fixed `node_scaling_factor` forcing new instance on `google_bigtable_instance` when adding new cluster
3+
```
4+
5+
### Context:
6+
7+
Related Github Issue: https://github.com/hashicorp/terraform-provider-google/issues/23564
8+
9+
TLDR; When adding a new cluster to an existing bigtable instance a forced recreate is required due to the `forceNew` attribute on `node_scaling_factor`. This doesn't match the API / Console functionality where adding a new cluster requires no recreate.
10+
11+
This PR removes the unconditional `forceNew` attribute on `node_scaling_factor` and instead adds to the CustomizeDiff to check if an existing cluster's `node_scaling_factor` has changed or not. If yes; require recreate. If no; don't require recreate.
12+
13+
### Testing:
14+
15+
I've added two new test cases.
16+
1. Test modifying existing cluster's `node_scaling_factor` and fails when `deletion_protection` is `true`
17+
2. Test adding a new cluster to an existing instance AND does not require deletion/recreate.
18+
19+
I've run these test in local env, sample logs below
20+
21+
```
22+
brandon.cate@DD944JY0FF terraform-provider-google-beta % make testacc TEST=./google-beta/services/bigtable TESTARGS='-run=TestAccBigtableInstance_addNewClusterWithoutDeletionProtection$$'
23+
==> Checking that code complies with gofmt requirements...
24+
go vet
25+
TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test ./google-beta/services/bigtable -v -run=TestAccBigtableInstance_addNewClusterWithoutDeletionProtection$ -timeout 240m -ldflags="-X=github.com/hashicorp/terraform-provider-google-beta/version.ProviderVersion=acc"
26+
=== RUN TestAccBigtableInstance_addNewClusterWithoutDeletionProtection
27+
=== PAUSE TestAccBigtableInstance_addNewClusterWithoutDeletionProtection
28+
=== CONT TestAccBigtableInstance_addNewClusterWithoutDeletionProtection
29+
--- PASS: TestAccBigtableInstance_addNewClusterWithoutDeletionProtection (45.62s)
30+
PASS
31+
ok github.com/hashicorp/terraform-provider-google-beta/google-beta/services/bigtable 46.799s
32+
33+
34+
brandon.cate@DD944JY0FF terraform-provider-google-beta % make testacc TEST=./google-beta/services/bigtable TESTARGS='-run=TestAccBigtableInstance_createWithNodeScalingFactorThenFailFromDeletionProtection$$'
35+
==> Checking that code complies with gofmt requirements...
36+
go vet
37+
TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test ./google-beta/services/bigtable -v -run=TestAccBigtableInstance_createWithNodeScalingFactorThenFailFromDeletionProtection$ -timeout 240m -ldflags="-X=github.com/hashicorp/terraform-provider-google-beta/version.ProviderVersion=acc"
38+
=== RUN TestAccBigtableInstance_createWithNodeScalingFactorThenFailFromDeletionProtection
39+
=== PAUSE TestAccBigtableInstance_createWithNodeScalingFactorThenFailFromDeletionProtection
40+
=== CONT TestAccBigtableInstance_createWithNodeScalingFactorThenFailFromDeletionProtection
41+
--- PASS: TestAccBigtableInstance_createWithNodeScalingFactorThenFailFromDeletionProtection (27.52s)
42+
PASS
43+
ok github.com/hashicorp/terraform-provider-google-beta/google-beta/services/bigtable 28.493s
44+
```

google-beta/services/bigtable/resource_bigtable_instance.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,6 @@ func ResourceBigtableInstance() *schema.Resource {
171171
"node_scaling_factor": {
172172
Type: schema.TypeString,
173173
Optional: true,
174-
ForceNew: true,
175174
Default: "NodeScalingFactor1X",
176175
ValidateFunc: validation.StringInSlice([]string{"NodeScalingFactor1X", "NodeScalingFactor2X"}, false),
177176
Description: `The node scaling factor of this cluster. One of "NodeScalingFactor1X" or "NodeScalingFactor2X". Defaults to "NodeScalingFactor1X".`,
@@ -837,6 +836,14 @@ func resourceBigtableInstanceClusterReorderTypeListFunc(diff tpgresource.Terrafo
837836
return fmt.Errorf("Error setting cluster diff: %s", err)
838837
}
839838
}
839+
840+
oNSF, nNSF := diff.GetChange(fmt.Sprintf("cluster.%d.node_scaling_factor", i))
841+
if oNSF != nNSF {
842+
err := diff.ForceNew(fmt.Sprintf("cluster.%d.node_scaling_factor", i))
843+
if err != nil {
844+
return fmt.Errorf("Error setting cluster diff: %s", err)
845+
}
846+
}
840847
}
841848

842849
return nil

google-beta/services/bigtable/resource_bigtable_instance_test.go

Lines changed: 140 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -577,7 +577,7 @@ func TestAccBigtableInstance_createWithNodeScalingFactorDefault(t *testing.T) {
577577
{
578578
// Create config with nothing specified for node scaling factor.
579579
// Ensure that we get 1X back.
580-
Config: testAccBigtableInstance_nodeScalingFactor_allowDestroy(instanceName, 2, ""),
580+
Config: testAccBigtableInstance_nodeScalingFactor(instanceName, 2, "", false),
581581
Check: resource.ComposeTestCheckFunc(
582582
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.num_nodes", "2"),
583583
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor1X"),
@@ -607,7 +607,7 @@ func TestAccBigtableInstance_createWithNodeScalingFactorThenUpdateViaForceNew(t
607607
Steps: []resource.TestStep{
608608
{
609609
// Create config with node scaling factor as 2x.
610-
Config: testAccBigtableInstance_nodeScalingFactor_allowDestroy(instanceName, 2, "NodeScalingFactor2X"),
610+
Config: testAccBigtableInstance_nodeScalingFactor(instanceName, 2, "NodeScalingFactor2X", false),
611611
Check: resource.ComposeTestCheckFunc(
612612
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.num_nodes", "2"),
613613
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor2X"),
@@ -621,7 +621,7 @@ func TestAccBigtableInstance_createWithNodeScalingFactorThenUpdateViaForceNew(t
621621
},
622622
{
623623
// Updating the node scaling factor only possible without delete protection, as we need ForceNew
624-
Config: testAccBigtableInstance_nodeScalingFactor_allowDestroy(instanceName, 2, "NodeScalingFactor1X"),
624+
Config: testAccBigtableInstance_nodeScalingFactor(instanceName, 2, "NodeScalingFactor1X", false),
625625
Check: resource.ComposeTestCheckFunc(
626626
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.num_nodes", "2"),
627627
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor1X"),
@@ -637,7 +637,108 @@ func TestAccBigtableInstance_createWithNodeScalingFactorThenUpdateViaForceNew(t
637637
})
638638
}
639639

640-
func testAccBigtableInstance_nodeScalingFactor_allowDestroy(instanceName string, numNodes int, nodeScalingFactor string) string {
640+
func TestAccBigtableInstance_createWithNodeScalingFactorThenFailFromDeletionProtection(t *testing.T) {
641+
// bigtable instance does not use the shared HTTP client, this test creates an instance
642+
acctest.SkipIfVcr(t)
643+
t.Parallel()
644+
645+
instanceName := fmt.Sprintf("tf-test-nsf-%s", acctest.RandString(t, 10))
646+
647+
acctest.VcrTest(t, resource.TestCase{
648+
PreCheck: func() { acctest.AccTestPreCheck(t) },
649+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
650+
CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t),
651+
Steps: []resource.TestStep{
652+
{
653+
// Create config with node scaling factor as 2x.
654+
Config: testAccBigtableInstance_nodeScalingFactor(instanceName, 2, "NodeScalingFactor2X", true),
655+
Check: resource.ComposeTestCheckFunc(
656+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor2X"),
657+
),
658+
},
659+
{
660+
ResourceName: "google_bigtable_instance.instance",
661+
ImportState: true,
662+
ImportStateVerify: true,
663+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
664+
},
665+
{
666+
// Updating the node scaling factor only possible without delete protection, should error
667+
Config: testAccBigtableInstance_nodeScalingFactor(instanceName, 2, "NodeScalingFactor1X", true),
668+
ExpectError: regexp.MustCompile("deletion_protection"), // change in node_scaling_factor causes recreate which fails b/c of deletion_protection = true
669+
Check: resource.ComposeTestCheckFunc(
670+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor1X"),
671+
),
672+
},
673+
{
674+
ResourceName: "google_bigtable_instance.instance",
675+
ImportState: true,
676+
ImportStateVerify: true,
677+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
678+
},
679+
{
680+
// set deletion protection false to allow cleanup
681+
Config: testAccBigtableInstance_nodeScalingFactor(instanceName, 2, "NodeScalingFactor2X", false),
682+
Check: resource.ComposeTestCheckFunc(
683+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "deletion_protection", "false"),
684+
),
685+
},
686+
},
687+
})
688+
}
689+
690+
func TestAccBigtableInstance_addNewClusterWithoutDeletionProtection(t *testing.T) {
691+
// bigtable instance does not use the shared HTTP client, this test creates an instance
692+
acctest.SkipIfVcr(t)
693+
t.Parallel()
694+
695+
instanceName := fmt.Sprintf("tf-test-nsf-%s", acctest.RandString(t, 10))
696+
697+
acctest.VcrTest(t, resource.TestCase{
698+
PreCheck: func() { acctest.AccTestPreCheck(t) },
699+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
700+
CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t),
701+
Steps: []resource.TestStep{
702+
{
703+
// creates new instance, single cluster, deletion protection enabled
704+
Config: testAccBigtableInstance_nodeScalingFactor(instanceName, 2, "NodeScalingFactor2X", true),
705+
Check: resource.ComposeTestCheckFunc(
706+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.num_nodes", "2"),
707+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.0.node_scaling_factor", "NodeScalingFactor2X"),
708+
),
709+
},
710+
{
711+
ResourceName: "google_bigtable_instance.instance",
712+
ImportState: true,
713+
ImportStateVerify: true,
714+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
715+
},
716+
{
717+
// adds new cluster to instance (does not require recreate)
718+
Config: testAccBigtableInstance_nodeScalingFactor_multipleClusters(instanceName, 2, "NodeScalingFactor2X", true),
719+
Check: resource.ComposeTestCheckFunc(
720+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.1.num_nodes", "2"),
721+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "cluster.1.node_scaling_factor", "NodeScalingFactor2X"),
722+
),
723+
},
724+
{
725+
ResourceName: "google_bigtable_instance.instance",
726+
ImportState: true,
727+
ImportStateVerify: true,
728+
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
729+
},
730+
{
731+
// set instance to have no deletion protection to allow cleanup
732+
Config: testAccBigtableInstance_nodeScalingFactor_multipleClusters(instanceName, 2, "NodeScalingFactor2X", false),
733+
Check: resource.ComposeTestCheckFunc(
734+
resource.TestCheckResourceAttr("google_bigtable_instance.instance", "deletion_protection", "false"),
735+
),
736+
},
737+
},
738+
})
739+
}
740+
741+
func testAccBigtableInstance_nodeScalingFactor(instanceName string, numNodes int, nodeScalingFactor string, deletionProtection bool) string {
641742
nodeScalingFactorAttribute := ""
642743
if nodeScalingFactor != "" {
643744
nodeScalingFactorAttribute = fmt.Sprintf("node_scaling_factor = \"%s\"", nodeScalingFactor)
@@ -652,9 +753,42 @@ resource "google_bigtable_instance" "instance" {
652753
storage_type = "SSD"
653754
%s
654755
}
655-
deletion_protection = false
756+
deletion_protection = %t
757+
}
758+
`, instanceName, instanceName, numNodes, nodeScalingFactorAttribute, deletionProtection)
759+
}
760+
761+
func testAccBigtableInstance_nodeScalingFactor_multipleClusters(instanceName string, numNodes int, nodeScalingFactor string, deletionProtection bool) string {
762+
nodeScalingFactorAttribute := ""
763+
if nodeScalingFactor != "" {
764+
nodeScalingFactorAttribute = fmt.Sprintf("node_scaling_factor = \"%s\"", nodeScalingFactor)
765+
}
766+
return fmt.Sprintf(`
767+
resource "google_bigtable_instance" "instance" {
768+
name = "%s"
769+
770+
cluster {
771+
cluster_id = "%s"
772+
zone = "us-central1-b"
773+
num_nodes = %d
774+
storage_type = "SSD"
775+
%s
776+
}
777+
778+
cluster {
779+
cluster_id = "%s-2"
780+
zone = "us-east4-a"
781+
num_nodes = %d
782+
storage_type = "SSD"
783+
%s
784+
}
785+
786+
deletion_protection = %t
656787
}
657-
`, instanceName, instanceName, numNodes, nodeScalingFactorAttribute)
788+
`, instanceName,
789+
instanceName, numNodes, nodeScalingFactorAttribute, // first cluster
790+
instanceName, numNodes, nodeScalingFactorAttribute, // second cluster
791+
deletionProtection)
658792
}
659793

660794
func testAccBigtableInstance_multipleClustersSameID(instanceName string) string {

0 commit comments

Comments
 (0)