Skip to content

Commit 186df1b

Browse files
authored
Merge branch 'main' into huge_pages
2 parents 5a1d5e7 + ad9cbb1 commit 186df1b

File tree

61 files changed

+13485
-215
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+13485
-215
lines changed

Makefile

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# Make will use bash instead of sh
1919
SHELL := /usr/bin/env bash
2020

21-
DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 1.23
21+
DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 1.24
2222
DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools
2323
REGISTRY_URL := gcr.io/cloud-foundation-cicd
2424
DOCKER_BIN ?= docker
@@ -70,6 +70,7 @@ docker_test_integration:
7070
docker_test_lint:
7171
$(DOCKER_BIN) run --rm -it \
7272
-e ENABLE_PARALLEL=1 \
73+
-e ENABLE_BPMETADATA=1 \
7374
-v "$(CURDIR)":/workspace \
7475
$(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \
7576
/usr/local/bin/test_lint.sh
@@ -78,9 +79,10 @@ docker_test_lint:
7879
.PHONY: docker_generate_docs
7980
docker_generate_docs:
8081
$(DOCKER_BIN) run --rm -it \
82+
-e ENABLE_BPMETADATA=1 \
8183
-v "$(CURDIR)":/workspace \
8284
$(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \
83-
/bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate_docs'
85+
/bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate_docs display'
8486

8587
# Generate files from autogen
8688
.PHONY: docker_generate_modules

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,7 @@ The node_pools variable takes the following parameters:
355355
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
356356
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
357357
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
358-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
358+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning `SHORT_LIVED` | "SURGE" | Optional |
359359
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
360360
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
361361
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

autogen/main/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ The node_pools variable takes the following parameters:
238238
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
239239
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
240240
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
241-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
241+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning `SHORT_LIVED` | "SURGE" | Optional |
242242
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
243243
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
244244
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

autogen/main/cluster.tf.tmpl

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -960,20 +960,23 @@ resource "google_container_node_pool" "windows_pools" {
960960
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
961961
}
962962

963-
upgrade_settings {
964-
strategy = lookup(each.value, "strategy", "SURGE")
965-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
966-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
963+
dynamic "upgrade_settings" {
964+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
965+
content {
966+
strategy = lookup(each.value, "strategy", "SURGE")
967+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
968+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
967969

968-
dynamic "blue_green_settings" {
969-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
970-
content {
971-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
970+
dynamic "blue_green_settings" {
971+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
972+
content {
973+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
972974

973-
standard_rollout_policy {
974-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
975-
batch_percentage = lookup(each.value, "batch_percentage", null)
976-
batch_node_count = lookup(each.value, "batch_node_count", null)
975+
standard_rollout_policy {
976+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
977+
batch_percentage = lookup(each.value, "batch_percentage", null)
978+
batch_node_count = lookup(each.value, "batch_node_count", null)
979+
}
977980
}
978981
}
979982
}

build/int.cloudbuild.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,6 @@ tags:
486486
- 'integration'
487487
substitutions:
488488
_DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools'
489-
_DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.23'
489+
_DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.24'
490490
options:
491491
machineType: 'E2_HIGHCPU_8'

cluster.tf

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -663,20 +663,23 @@ resource "google_container_node_pool" "pools" {
663663
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
664664
}
665665

666-
upgrade_settings {
667-
strategy = lookup(each.value, "strategy", "SURGE")
668-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
669-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
666+
dynamic "upgrade_settings" {
667+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
668+
content {
669+
strategy = lookup(each.value, "strategy", "SURGE")
670+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
671+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
670672

671-
dynamic "blue_green_settings" {
672-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
673-
content {
674-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
673+
dynamic "blue_green_settings" {
674+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
675+
content {
676+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
675677

676-
standard_rollout_policy {
677-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
678-
batch_percentage = lookup(each.value, "batch_percentage", null)
679-
batch_node_count = lookup(each.value, "batch_node_count", null)
678+
standard_rollout_policy {
679+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
680+
batch_percentage = lookup(each.value, "batch_percentage", null)
681+
batch_node_count = lookup(each.value, "batch_node_count", null)
682+
}
680683
}
681684
}
682685
}
@@ -1011,20 +1014,23 @@ resource "google_container_node_pool" "windows_pools" {
10111014
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
10121015
}
10131016

1014-
upgrade_settings {
1015-
strategy = lookup(each.value, "strategy", "SURGE")
1016-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
1017-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
1017+
dynamic "upgrade_settings" {
1018+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
1019+
content {
1020+
strategy = lookup(each.value, "strategy", "SURGE")
1021+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
1022+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
10181023

1019-
dynamic "blue_green_settings" {
1020-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
1021-
content {
1022-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
1024+
dynamic "blue_green_settings" {
1025+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
1026+
content {
1027+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
10231028

1024-
standard_rollout_policy {
1025-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
1026-
batch_percentage = lookup(each.value, "batch_percentage", null)
1027-
batch_node_count = lookup(each.value, "batch_node_count", null)
1029+
standard_rollout_policy {
1030+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
1031+
batch_percentage = lookup(each.value, "batch_percentage", null)
1032+
batch_node_count = lookup(each.value, "batch_node_count", null)
1033+
}
10281034
}
10291035
}
10301036
}

examples/node_pool/main.tf

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ module "gke" {
9292
min_count = 0
9393
service_account = var.compute_engine_service_account
9494
queued_provisioning = true
95+
strategy = "SHORT_LIVED"
9596
},
9697
{
9798
name = "pool-05"
@@ -103,7 +104,7 @@ module "gke" {
103104
{
104105
name = "pool-06"
105106
node_count = 1
106-
machine_type = "n1-highmem-96"
107+
machine_type = "c2-standard-30"
107108
node_affinity = "{\"key\": \"compute.googleapis.com/node-group-name\", \"operator\": \"IN\", \"values\": [\"${google_compute_node_group.soletenant-nodes.name}\"]}"
108109
},
109110
]
@@ -180,7 +181,7 @@ resource "google_compute_node_template" "soletenant-tmpl" {
180181
name = "soletenant-tmpl-${var.cluster_name_suffix}"
181182
region = var.region
182183

183-
node_type = "n1-node-96-624"
184+
node_type = "c2-node-60-240"
184185
}
185186

186187
resource "google_compute_node_group" "soletenant-nodes" {

examples/safer_cluster_iap_bastion/bastion.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ module "bastion" {
3030
name = local.bastion_name
3131
zone = local.bastion_zone
3232
image_project = "debian-cloud"
33-
machine_type = "g1-small"
33+
machine_type = "e2-small"
3434
startup_script = templatefile("${path.module}/templates/startup-script.tftpl", {})
3535
members = var.bastion_members
3636
shielded_vm = "false"

examples/simple_autopilot_private/main.tf

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,7 @@ module "gke" {
5050
enable_private_endpoint = true
5151
enable_private_nodes = true
5252
network_tags = [local.cluster_type]
53-
# TODO: b/413643369
54-
# node_pools_cgroup_mode = "CGROUP_MODE_V2"
53+
node_pools_cgroup_mode = "CGROUP_MODE_V2"
5554
deletion_protection = false
5655
insecure_kubelet_readonly_port_enabled = false
5756
}

examples/simple_autopilot_private_cmek/main.tf

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,8 @@ module "gke" {
7171
enable_private_endpoint = true
7272
enable_private_nodes = true
7373
network_tags = [local.cluster_type]
74-
# TODO: b/413643369
75-
# node_pools_cgroup_mode = "CGROUP_MODE_V2"
76-
deletion_protection = false
77-
boot_disk_kms_key = values(module.kms.keys)[0]
78-
depends_on = [google_kms_crypto_key_iam_member.main]
74+
node_pools_cgroup_mode = "CGROUP_MODE_V2"
75+
deletion_protection = false
76+
boot_disk_kms_key = values(module.kms.keys)[0]
77+
depends_on = [google_kms_crypto_key_iam_member.main]
7978
}

0 commit comments

Comments
 (0)