Skip to content

Commit f3417b1

Browse files
authored
fix: support for SHORT_LIVED strategy (#2352)
1 parent 22712d4 commit f3417b1

File tree

19 files changed

+241
-193
lines changed

19 files changed

+241
-193
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ The node_pools variable takes the following parameters:
353353
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
354354
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
355355
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
356-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
356+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning `SHORT_LIVED` | "SURGE" | Optional |
357357
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
358358
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
359359
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

autogen/main/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ The node_pools variable takes the following parameters:
238238
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
239239
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
240240
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
241-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
241+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning `SHORT_LIVED` | "SURGE" | Optional |
242242
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
243243
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
244244
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

autogen/main/cluster.tf.tmpl

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -960,20 +960,23 @@ resource "google_container_node_pool" "windows_pools" {
960960
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
961961
}
962962

963-
upgrade_settings {
964-
strategy = lookup(each.value, "strategy", "SURGE")
965-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
966-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
963+
dynamic "upgrade_settings" {
964+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
965+
content {
966+
strategy = lookup(each.value, "strategy", "SURGE")
967+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
968+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
967969

968-
dynamic "blue_green_settings" {
969-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
970-
content {
971-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
970+
dynamic "blue_green_settings" {
971+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
972+
content {
973+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
972974

973-
standard_rollout_policy {
974-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
975-
batch_percentage = lookup(each.value, "batch_percentage", null)
976-
batch_node_count = lookup(each.value, "batch_node_count", null)
975+
standard_rollout_policy {
976+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
977+
batch_percentage = lookup(each.value, "batch_percentage", null)
978+
batch_node_count = lookup(each.value, "batch_node_count", null)
979+
}
977980
}
978981
}
979982
}

cluster.tf

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -663,20 +663,23 @@ resource "google_container_node_pool" "pools" {
663663
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
664664
}
665665

666-
upgrade_settings {
667-
strategy = lookup(each.value, "strategy", "SURGE")
668-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
669-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
666+
dynamic "upgrade_settings" {
667+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
668+
content {
669+
strategy = lookup(each.value, "strategy", "SURGE")
670+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
671+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
670672

671-
dynamic "blue_green_settings" {
672-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
673-
content {
674-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
673+
dynamic "blue_green_settings" {
674+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
675+
content {
676+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
675677

676-
standard_rollout_policy {
677-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
678-
batch_percentage = lookup(each.value, "batch_percentage", null)
679-
batch_node_count = lookup(each.value, "batch_node_count", null)
678+
standard_rollout_policy {
679+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
680+
batch_percentage = lookup(each.value, "batch_percentage", null)
681+
batch_node_count = lookup(each.value, "batch_node_count", null)
682+
}
680683
}
681684
}
682685
}
@@ -994,20 +997,23 @@ resource "google_container_node_pool" "windows_pools" {
994997
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
995998
}
996999

997-
upgrade_settings {
998-
strategy = lookup(each.value, "strategy", "SURGE")
999-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
1000-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
1000+
dynamic "upgrade_settings" {
1001+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
1002+
content {
1003+
strategy = lookup(each.value, "strategy", "SURGE")
1004+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
1005+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
10011006

1002-
dynamic "blue_green_settings" {
1003-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
1004-
content {
1005-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
1007+
dynamic "blue_green_settings" {
1008+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
1009+
content {
1010+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
10061011

1007-
standard_rollout_policy {
1008-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
1009-
batch_percentage = lookup(each.value, "batch_percentage", null)
1010-
batch_node_count = lookup(each.value, "batch_node_count", null)
1012+
standard_rollout_policy {
1013+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
1014+
batch_percentage = lookup(each.value, "batch_percentage", null)
1015+
batch_node_count = lookup(each.value, "batch_node_count", null)
1016+
}
10111017
}
10121018
}
10131019
}

examples/node_pool/main.tf

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ module "gke" {
9292
min_count = 0
9393
service_account = var.compute_engine_service_account
9494
queued_provisioning = true
95+
strategy = "SHORT_LIVED"
9596
},
9697
{
9798
name = "pool-05"
@@ -102,7 +103,7 @@ module "gke" {
102103
{
103104
name = "pool-06"
104105
node_count = 1
105-
machine_type = "n1-highmem-96"
106+
machine_type = "c2-standard-30"
106107
node_affinity = "{\"key\": \"compute.googleapis.com/node-group-name\", \"operator\": \"IN\", \"values\": [\"${google_compute_node_group.soletenant-nodes.name}\"]}"
107108
},
108109
]
@@ -170,7 +171,7 @@ resource "google_compute_node_template" "soletenant-tmpl" {
170171
name = "soletenant-tmpl-${var.cluster_name_suffix}"
171172
region = var.region
172173

173-
node_type = "n1-node-96-624"
174+
node_type = "c2-node-60-240"
174175
}
175176

176177
resource "google_compute_node_group" "soletenant-nodes" {

examples/safer_cluster_iap_bastion/bastion.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ module "bastion" {
3030
name = local.bastion_name
3131
zone = local.bastion_zone
3232
image_project = "debian-cloud"
33-
machine_type = "g1-small"
33+
machine_type = "e2-small"
3434
startup_script = templatefile("${path.module}/templates/startup-script.tftpl", {})
3535
members = var.bastion_members
3636
shielded_vm = "false"

modules/beta-private-cluster-update-variant/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -408,7 +408,7 @@ The node_pools variable takes the following parameters:
408408
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
409409
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
410410
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
411-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
411+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning `SHORT_LIVED` | "SURGE" | Optional |
412412
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
413413
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
414414
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

modules/beta-private-cluster-update-variant/cluster.tf

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -819,20 +819,23 @@ resource "google_container_node_pool" "pools" {
819819
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
820820
}
821821

822-
upgrade_settings {
823-
strategy = lookup(each.value, "strategy", "SURGE")
824-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
825-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
822+
dynamic "upgrade_settings" {
823+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
824+
content {
825+
strategy = lookup(each.value, "strategy", "SURGE")
826+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
827+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
826828

827-
dynamic "blue_green_settings" {
828-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
829-
content {
830-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
829+
dynamic "blue_green_settings" {
830+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
831+
content {
832+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
831833

832-
standard_rollout_policy {
833-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
834-
batch_percentage = lookup(each.value, "batch_percentage", null)
835-
batch_node_count = lookup(each.value, "batch_node_count", null)
834+
standard_rollout_policy {
835+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
836+
batch_percentage = lookup(each.value, "batch_percentage", null)
837+
batch_node_count = lookup(each.value, "batch_node_count", null)
838+
}
836839
}
837840
}
838841
}
@@ -1164,20 +1167,23 @@ resource "google_container_node_pool" "windows_pools" {
11641167
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
11651168
}
11661169

1167-
upgrade_settings {
1168-
strategy = lookup(each.value, "strategy", "SURGE")
1169-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
1170-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
1170+
dynamic "upgrade_settings" {
1171+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
1172+
content {
1173+
strategy = lookup(each.value, "strategy", "SURGE")
1174+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
1175+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
11711176

1172-
dynamic "blue_green_settings" {
1173-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
1174-
content {
1175-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
1177+
dynamic "blue_green_settings" {
1178+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
1179+
content {
1180+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
11761181

1177-
standard_rollout_policy {
1178-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
1179-
batch_percentage = lookup(each.value, "batch_percentage", null)
1180-
batch_node_count = lookup(each.value, "batch_node_count", null)
1182+
standard_rollout_policy {
1183+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
1184+
batch_percentage = lookup(each.value, "batch_percentage", null)
1185+
batch_node_count = lookup(each.value, "batch_node_count", null)
1186+
}
11811187
}
11821188
}
11831189
}

modules/beta-private-cluster/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,7 @@ The node_pools variable takes the following parameters:
386386
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
387387
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
388388
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
389-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
389+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning `SHORT_LIVED` | "SURGE" | Optional |
390390
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
391391
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
392392
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

0 commit comments

Comments
 (0)