@@ -561,7 +561,7 @@ func TestAccBigtableInstance_createWithNodeScalingFactorDefault(t *testing.T) {
561561 {
562562 // Create config with nothing specified for node scaling factor.
563563 // Ensure that we get 1X back.
564- Config : testAccBigtableInstance_nodeScalingFactor_allowDestroy (instanceName , 2 , "" ),
564+ Config : testAccBigtableInstance_nodeScalingFactor (instanceName , 2 , "" , false ),
565565 Check : resource .ComposeTestCheckFunc (
566566 resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.num_nodes" , "2" ),
567567 resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.node_scaling_factor" , "NodeScalingFactor1X" ),
@@ -591,7 +591,7 @@ func TestAccBigtableInstance_createWithNodeScalingFactorThenUpdateViaForceNew(t
591591 Steps : []resource.TestStep {
592592 {
593593 // Create config with node scaling factor as 2x.
594- Config : testAccBigtableInstance_nodeScalingFactor_allowDestroy (instanceName , 2 , "NodeScalingFactor2X" ),
594+ Config : testAccBigtableInstance_nodeScalingFactor (instanceName , 2 , "NodeScalingFactor2X" , false ),
595595 Check : resource .ComposeTestCheckFunc (
596596 resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.num_nodes" , "2" ),
597597 resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.node_scaling_factor" , "NodeScalingFactor2X" ),
@@ -605,7 +605,7 @@ func TestAccBigtableInstance_createWithNodeScalingFactorThenUpdateViaForceNew(t
605605 },
606606 {
607607 // Updating the node scaling factor only possible without delete protection, as we need ForceNew
608- Config : testAccBigtableInstance_nodeScalingFactor_allowDestroy (instanceName , 2 , "NodeScalingFactor1X" ),
608+ Config : testAccBigtableInstance_nodeScalingFactor (instanceName , 2 , "NodeScalingFactor1X" , false ),
609609 Check : resource .ComposeTestCheckFunc (
610610 resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.num_nodes" , "2" ),
611611 resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.node_scaling_factor" , "NodeScalingFactor1X" ),
@@ -621,7 +621,108 @@ func TestAccBigtableInstance_createWithNodeScalingFactorThenUpdateViaForceNew(t
621621 })
622622}
623623
624- func testAccBigtableInstance_nodeScalingFactor_allowDestroy (instanceName string , numNodes int , nodeScalingFactor string ) string {
624+ func TestAccBigtableInstance_createWithNodeScalingFactorThenFailFromDeletionProtection (t * testing.T ) {
625+ // bigtable instance does not use the shared HTTP client, this test creates an instance
626+ acctest .SkipIfVcr (t )
627+ t .Parallel ()
628+
629+ instanceName := fmt .Sprintf ("tf-test-nsf-%s" , acctest .RandString (t , 10 ))
630+
631+ acctest .VcrTest (t , resource.TestCase {
632+ PreCheck : func () { acctest .AccTestPreCheck (t ) },
633+ ProtoV5ProviderFactories : acctest .ProtoV5ProviderFactories (t ),
634+ CheckDestroy : testAccCheckBigtableInstanceDestroyProducer (t ),
635+ Steps : []resource.TestStep {
636+ {
637+ // Create config with node scaling factor as 2x.
638+ Config : testAccBigtableInstance_nodeScalingFactor (instanceName , 2 , "NodeScalingFactor2X" , true ),
639+ Check : resource .ComposeTestCheckFunc (
640+ resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.node_scaling_factor" , "NodeScalingFactor2X" ),
641+ ),
642+ },
643+ {
644+ ResourceName : "google_bigtable_instance.instance" ,
645+ ImportState : true ,
646+ ImportStateVerify : true ,
647+ ImportStateVerifyIgnore : []string {"deletion_protection" , "instance_type" }, // we don't read instance type back
648+ },
649+ {
650+ // Updating the node scaling factor only possible without delete protection, should error
651+ Config : testAccBigtableInstance_nodeScalingFactor (instanceName , 2 , "NodeScalingFactor1X" , true ),
652+ ExpectError : regexp .MustCompile ("deletion_protection" ), // change in node_scaling_factor causes recreate which fails b/c of deletion_protection = true
653+ Check : resource .ComposeTestCheckFunc (
654+ resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.node_scaling_factor" , "NodeScalingFactor1X" ),
655+ ),
656+ },
657+ {
658+ ResourceName : "google_bigtable_instance.instance" ,
659+ ImportState : true ,
660+ ImportStateVerify : true ,
661+ ImportStateVerifyIgnore : []string {"deletion_protection" , "instance_type" }, // we don't read instance type back
662+ },
663+ {
664+ // set deletion protection false to allow cleanup
665+ Config : testAccBigtableInstance_nodeScalingFactor (instanceName , 2 , "NodeScalingFactor2X" , false ),
666+ Check : resource .ComposeTestCheckFunc (
667+ resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "deletion_protection" , "false" ),
668+ ),
669+ },
670+ },
671+ })
672+ }
673+
674+ func TestAccBigtableInstance_addNewClusterWithoutDeletionProtection (t * testing.T ) {
675+ // bigtable instance does not use the shared HTTP client, this test creates an instance
676+ acctest .SkipIfVcr (t )
677+ t .Parallel ()
678+
679+ instanceName := fmt .Sprintf ("tf-test-nsf-%s" , acctest .RandString (t , 10 ))
680+
681+ acctest .VcrTest (t , resource.TestCase {
682+ PreCheck : func () { acctest .AccTestPreCheck (t ) },
683+ ProtoV5ProviderFactories : acctest .ProtoV5ProviderFactories (t ),
684+ CheckDestroy : testAccCheckBigtableInstanceDestroyProducer (t ),
685+ Steps : []resource.TestStep {
686+ {
687+ // creates new instance, single cluster, deletion protection enabled
688+ Config : testAccBigtableInstance_nodeScalingFactor (instanceName , 2 , "NodeScalingFactor2X" , true ),
689+ Check : resource .ComposeTestCheckFunc (
690+ resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.num_nodes" , "2" ),
691+ resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.0.node_scaling_factor" , "NodeScalingFactor2X" ),
692+ ),
693+ },
694+ {
695+ ResourceName : "google_bigtable_instance.instance" ,
696+ ImportState : true ,
697+ ImportStateVerify : true ,
698+ ImportStateVerifyIgnore : []string {"deletion_protection" , "instance_type" }, // we don't read instance type back
699+ },
700+ {
701+ // adds new cluster to instance (does not require recreate)
702+ Config : testAccBigtableInstance_nodeScalingFactor_multipleClusters (instanceName , 2 , "NodeScalingFactor2X" , true ),
703+ Check : resource .ComposeTestCheckFunc (
704+ resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.1.num_nodes" , "2" ),
705+ resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "cluster.1.node_scaling_factor" , "NodeScalingFactor2X" ),
706+ ),
707+ },
708+ {
709+ ResourceName : "google_bigtable_instance.instance" ,
710+ ImportState : true ,
711+ ImportStateVerify : true ,
712+ ImportStateVerifyIgnore : []string {"deletion_protection" , "instance_type" }, // we don't read instance type back
713+ },
714+ {
715+ // set instance to have no deletion protection to allow cleanup
716+ Config : testAccBigtableInstance_nodeScalingFactor_multipleClusters (instanceName , 2 , "NodeScalingFactor2X" , false ),
717+ Check : resource .ComposeTestCheckFunc (
718+ resource .TestCheckResourceAttr ("google_bigtable_instance.instance" , "deletion_protection" , "false" ),
719+ ),
720+ },
721+ },
722+ })
723+ }
724+
725+ func testAccBigtableInstance_nodeScalingFactor (instanceName string , numNodes int , nodeScalingFactor string , deletionProtection bool ) string {
625726 nodeScalingFactorAttribute := ""
626727 if nodeScalingFactor != "" {
627728 nodeScalingFactorAttribute = fmt .Sprintf ("node_scaling_factor = \" %s\" " , nodeScalingFactor )
@@ -636,9 +737,42 @@ resource "google_bigtable_instance" "instance" {
636737 storage_type = "SSD"
637738 %s
638739 }
639- deletion_protection = false
740+ deletion_protection = %t
741+ }
742+ ` , instanceName , instanceName , numNodes , nodeScalingFactorAttribute , deletionProtection )
743+ }
744+
745+ func testAccBigtableInstance_nodeScalingFactor_multipleClusters (instanceName string , numNodes int , nodeScalingFactor string , deletionProtection bool ) string {
746+ nodeScalingFactorAttribute := ""
747+ if nodeScalingFactor != "" {
748+ nodeScalingFactorAttribute = fmt .Sprintf ("node_scaling_factor = \" %s\" " , nodeScalingFactor )
749+ }
750+ return fmt .Sprintf (`
751+ resource "google_bigtable_instance" "instance" {
752+ name = "%s"
753+
754+ cluster {
755+ cluster_id = "%s"
756+ zone = "us-central1-b"
757+ num_nodes = %d
758+ storage_type = "SSD"
759+ %s
760+ }
761+
762+ cluster {
763+ cluster_id = "%s-2"
764+ zone = "us-east4-a"
765+ num_nodes = %d
766+ storage_type = "SSD"
767+ %s
768+ }
769+
770+ deletion_protection = %t
640771}
641- ` , instanceName , instanceName , numNodes , nodeScalingFactorAttribute )
772+ ` , instanceName ,
773+ instanceName , numNodes , nodeScalingFactorAttribute , // first cluster
774+ instanceName , numNodes , nodeScalingFactorAttribute , // second cluster
775+ deletionProtection )
642776}
643777
644778func testAccBigtableInstance_multipleClustersSameID (instanceName string ) string {
0 commit comments