Skip to content

Commit 1dc730c

Browse files
Support node drain config (#16087) (#25791)
[upstream:534cd1f08de4cab912a460a6645f1dcacf80f23d] Signed-off-by: Modular Magician <magic-modules@google.com>
1 parent f4ca726 commit 1dc730c

File tree

7 files changed

+199
-1
lines changed

7 files changed

+199
-1
lines changed

.changelog/16087.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
unknown: Support node drain config

google/services/container/resource_container_cluster_meta.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -657,6 +657,8 @@ fields:
657657
api_field: 'nodePools.config.windowsNodeConfig.osVersion'
658658
- field: 'node_pool.node_config.workload_metadata_config.mode'
659659
api_field: 'nodePools.config.workloadMetadataConfig.mode'
660+
- field: 'node_pool.node_drain_config.respect_pdb_during_node_pool_deletion'
661+
api_field: 'nodePools.nodeDrainConfig.respectPdbDuringNodePoolDeletion'
660662
- field: 'node_pool.node_count'
661663
provider_only: true
662664
- field: 'node_pool.node_locations'

google/services/container/resource_container_cluster_test.go

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3241,6 +3241,36 @@ func TestAccContainerCluster_withNodePoolNodeConfig(t *testing.T) {
32413241
})
32423242
}
32433243

3244+
func TestAccContainerCluster_withNodePoolNodeDrainConfig(t *testing.T) {
3245+
t.Parallel()
3246+
3247+
cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
3248+
np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10))
3249+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
3250+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
3251+
3252+
acctest.VcrTest(t, resource.TestCase{
3253+
PreCheck: func() { acctest.AccTestPreCheck(t) },
3254+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
3255+
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
3256+
Steps: []resource.TestStep{
3257+
{
3258+
Config: testAccContainerCluster_withNodePoolNodeDrainConfig(cluster, np, networkName, subnetworkName),
3259+
Check: resource.ComposeTestCheckFunc(
3260+
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool_node_drain_config",
3261+
"node_pool.0.node_drain_config.0.respect_pdb_during_node_pool_deletion", "true"),
3262+
),
3263+
},
3264+
{
3265+
ResourceName: "google_container_cluster.with_node_pool_node_drain_config",
3266+
ImportState: true,
3267+
ImportStateVerify: true,
3268+
ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"},
3269+
},
3270+
},
3271+
})
3272+
}
3273+
32443274
func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) {
32453275
t.Parallel()
32463276

@@ -9993,6 +10023,31 @@ resource "google_container_cluster" "with_node_pool_node_config" {
999310023
`, cluster, np, networkName, subnetworkName)
999410024
}
999510025

10026+
func testAccContainerCluster_withNodePoolNodeDrainConfig(cluster, np, networkName, subnetworkName string) string {
10027+
return fmt.Sprintf(`
10028+
data "google_container_engine_versions" "central1a" {
10029+
location = "us-central1-a"
10030+
}
10031+
10032+
resource "google_container_cluster" "with_node_pool_node_drain_config" {
10033+
name = "%s"
10034+
location = "us-central1-a"
10035+
min_master_version = data.google_container_engine_versions.central1a.latest_master_version
10036+
node_pool {
10037+
name = "%s"
10038+
initial_node_count = 1
10039+
node_drain_config {
10040+
respect_pdb_during_node_pool_deletion = true
10041+
}
10042+
}
10043+
10044+
network = "%s"
10045+
subnetwork = "%s"
10046+
deletion_protection = false
10047+
}
10048+
`, cluster, np, networkName, subnetworkName)
10049+
}
10050+
999610051
func testAccContainerCluster_withMaintenanceWindow(clusterName, startTime, networkName, subnetworkName string) string {
999710052
maintenancePolicy := ""
999810053
if len(startTime) > 0 {

google/services/container/resource_container_node_pool.go

Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -645,6 +645,22 @@ var schemaNodePool = map[string]*schema.Schema{
645645
},
646646
},
647647
},
648+
649+
"node_drain_config": {
650+
Type: schema.TypeList,
651+
Optional: true,
652+
Computed: true,
653+
Description: `Node drain configuration for this NodePool.`,
654+
Elem: &schema.Resource{
655+
Schema: map[string]*schema.Schema{
656+
"respect_pdb_during_node_pool_deletion": {
657+
Type: schema.TypeBool,
658+
Optional: true,
659+
Description: `Whether to respect PodDisruptionBudget policy during node pool deletion.`,
660+
},
661+
},
662+
},
663+
},
648664
}
649665

650666
type NodePoolInformation struct {
@@ -1238,6 +1254,15 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool,
12381254
}
12391255
}
12401256

1257+
if v, ok := d.GetOk(prefix + "node_drain_config"); ok {
1258+
nodeDrainConfig := v.([]interface{})[0].(map[string]interface{})
1259+
np.NodeDrainConfig = &container.NodeDrainConfig{}
1260+
1261+
if v, ok := nodeDrainConfig["respect_pdb_during_node_pool_deletion"]; ok {
1262+
np.NodeDrainConfig.RespectPdbDuringNodePoolDeletion = v.(bool)
1263+
}
1264+
}
1265+
12411266
return np, nil
12421267
}
12431268

@@ -1282,6 +1307,17 @@ func flattenNodePoolUpgradeSettings(us *container.UpgradeSettings) []map[string]
12821307
return []map[string]interface{}{upgradeSettings}
12831308
}
12841309

1310+
func flattenNodePoolNodeDrainConfig(ndc *container.NodeDrainConfig) []map[string]interface{} {
1311+
if ndc == nil {
1312+
return nil
1313+
}
1314+
1315+
nodeDrainConfig := make(map[string]interface{})
1316+
1317+
nodeDrainConfig["respect_pdb_during_node_pool_deletion"] = ndc.RespectPdbDuringNodePoolDeletion
1318+
return []map[string]interface{}{nodeDrainConfig}
1319+
}
1320+
12851321
func flattenNodePool(d *schema.ResourceData, config *transport_tpg.Config, np *container.NodePool, prefix string) (map[string]interface{}, error) {
12861322
userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent)
12871323
if err != nil {
@@ -1386,6 +1422,10 @@ func flattenNodePool(d *schema.ResourceData, config *transport_tpg.Config, np *c
13861422
delete(nodePool, "upgrade_settings")
13871423
}
13881424

1425+
if np.NodeDrainConfig != nil {
1426+
nodePool["node_drain_config"] = flattenNodePoolNodeDrainConfig(np.NodeDrainConfig)
1427+
}
1428+
13891429
return nodePool, nil
13901430
}
13911431

@@ -1820,6 +1860,43 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
18201860
}
18211861
}
18221862

1863+
if d.HasChange(prefix + "node_drain_config") {
1864+
nodeDrainConfig := &container.NodeDrainConfig{}
1865+
if v, ok := d.GetOk(prefix + "node_drain_config"); ok {
1866+
nodeDrain := v.([]interface{})[0].(map[string]interface{})
1867+
if v, ok := nodeDrain["respect_pdb_during_node_pool_deletion"]; ok {
1868+
nodeDrainConfig.RespectPdbDuringNodePoolDeletion = v.(bool)
1869+
}
1870+
}
1871+
req := &container.UpdateNodePoolRequest{
1872+
NodeDrainConfig: nodeDrainConfig,
1873+
}
1874+
1875+
updateF := func() error {
1876+
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
1877+
1878+
if config.UserProjectOverride {
1879+
clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project)
1880+
}
1881+
op, err := clusterNodePoolsUpdateCall.Do()
1882+
1883+
if err != nil {
1884+
return err
1885+
}
1886+
1887+
// Wait until it's updated
1888+
return ContainerOperationWait(config, op,
1889+
nodePoolInfo.project,
1890+
nodePoolInfo.location,
1891+
"updating GKE node pool node_drain_config", userAgent, timeout)
1892+
}
1893+
1894+
if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil {
1895+
return err
1896+
}
1897+
log.Printf("[INFO] Updated node_drain_config in Node Pool %s", name)
1898+
}
1899+
18231900
return nil
18241901
}
18251902

google/services/container/resource_container_node_pool_meta.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,8 @@ fields:
278278
api_field: 'config.windowsNodeConfig.osVersion'
279279
- field: 'node_config.workload_metadata_config.mode'
280280
api_field: 'config.workloadMetadataConfig.mode'
281+
- field: 'node_drain_config.respect_pdb_during_node_pool_deletion'
282+
api_field: 'nodeDrainConfig.respectPdbDuringNodePoolDeletion'
281283
- field: 'node_count'
282284
provider_only: true
283285
- field: 'node_locations'

google/services/container/resource_container_node_pool_test.go

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1520,6 +1520,34 @@ func TestAccContainerNodePool_withManagement(t *testing.T) {
15201520
})
15211521
}
15221522

1523+
func TestAccContainerNodePool_withNodeDrainConfig(t *testing.T) {
1524+
t.Parallel()
1525+
1526+
cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
1527+
nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
1528+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
1529+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
1530+
1531+
acctest.VcrTest(t, resource.TestCase{
1532+
PreCheck: func() { acctest.AccTestPreCheck(t) },
1533+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
1534+
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
1535+
Steps: []resource.TestStep{
1536+
{
1537+
Config: testAccContainerNodePool_withNodeDrainConfig(cluster, nodePool, networkName, subnetworkName),
1538+
Check: resource.ComposeTestCheckFunc(
1539+
resource.TestCheckResourceAttr("google_container_node_pool.np_with_node_drain_config", "node_drain_config.0.respect_pdb_during_node_pool_deletion", "true"),
1540+
),
1541+
},
1542+
{
1543+
ResourceName: "google_container_node_pool.np_with_node_drain_config",
1544+
ImportState: true,
1545+
ImportStateVerify: true,
1546+
},
1547+
},
1548+
})
1549+
}
1550+
15231551
func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) {
15241552
t.Parallel()
15251553

@@ -4531,6 +4559,34 @@ resource "google_container_node_pool" "np_with_node_config_scope_alias" {
45314559
`, cluster, networkName, subnetworkName, np)
45324560
}
45334561

4562+
func testAccContainerNodePool_withNodeDrainConfig(cluster, np, networkName, subnetworkName string) string {
4563+
return fmt.Sprintf(`
4564+
data "google_container_engine_versions" "central1a" {
4565+
location = "us-central1-a"
4566+
}
4567+
4568+
resource "google_container_cluster" "cluster" {
4569+
name = "%s"
4570+
location = "us-central1-a"
4571+
initial_node_count = 1
4572+
min_master_version = data.google_container_engine_versions.central1a.latest_master_version
4573+
deletion_protection = false
4574+
network = "%s"
4575+
subnetwork = "%s"
4576+
}
4577+
4578+
resource "google_container_node_pool" "np_with_node_drain_config" {
4579+
name = "%s"
4580+
location = "us-central1-a"
4581+
cluster = google_container_cluster.cluster.name
4582+
initial_node_count = 1
4583+
node_drain_config {
4584+
respect_pdb_during_node_pool_deletion = true
4585+
}
4586+
}
4587+
`, cluster, networkName, subnetworkName, np)
4588+
}
4589+
45344590
func testAccContainerNodePool_version(cluster, np, networkName, subnetworkName string) string {
45354591
return fmt.Sprintf(`
45364592
data "google_container_engine_versions" "central1a" {

website/docs/r/container_node_pool.html.markdown

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,8 @@ cluster.
168168
* `node_count` - (Optional) The number of nodes per instance group. This field can be used to
169169
update the number of nodes per instance group but should not be used alongside `autoscaling`.
170170

171+
* `node_drain_config` - (Optional) The node drain configuration of the pool. Structure is [documented below](#nested_node_drain_config).
172+
171173
* `project` - (Optional) The ID of the project in which to create the node pool. If blank,
172174
the provider-configured project will be used.
173175

@@ -254,12 +256,15 @@ cluster.
254256
<a name="network_performance_config"></a>The `network_performance_config` block supports:
255257

256258
* `total_egress_bandwidth_tier` (Required) - Specifies the total network bandwidth tier for the NodePool. [Valid values](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.Tier) include: "TIER_1" and "TIER_UNSPECIFIED".
257-
* ```
258259

259260
<a name="pod_cidr_overprovision_config"></a>The `pod_cidr_overprovision_config` block supports:
260261

261262
* `disabled` (Required) - Whether pod cidr overprovision is disabled.
262263

264+
<a name="nested_node_drain_config"></a>The `node_drain_config` block supports:
265+
266+
* `respect_pdb_during_node_pool_deletion` - (Optional) Whether to respect PodDisruptionBudget policy during node pool deletion.
267+
263268
<a name="nested_upgrade_settings"></a>The `upgrade_settings` block supports:
264269

265270
* `max_surge` - (Optional) The number of additional nodes that can be added to the node pool during

0 commit comments

Comments
 (0)