Skip to content

Commit 1366981

Browse files
committed
update node
1 parent d195b87 commit 1366981

File tree

3 files changed

+5
-6
lines changed

3 files changed

+5
-6
lines changed

autogen/main/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ The node_pools variable takes the following parameters:
238238
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
239239
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
240240
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
241-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
241+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning only `SHORT_LIVED` | "SURGE" | Optional |
242242
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
243243
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
244244
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

examples/node_pool/main.tf

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,8 @@ module "gke" {
9292
min_count = 0
9393
service_account = var.compute_engine_service_account
9494
queued_provisioning = true
95+
max_surge = 0
96+
strategy = "SHORT_LIVED"
9597
},
9698
{
9799
name = "pool-05"
@@ -102,7 +104,7 @@ module "gke" {
102104
{
103105
name = "pool-06"
104106
node_count = 1
105-
machine_type = "n1-highmem-96"
107+
machine_type = "c2-standard-30"
106108
node_affinity = "{\"key\": \"compute.googleapis.com/node-group-name\", \"operator\": \"IN\", \"values\": [\"${google_compute_node_group.soletenant-nodes.name}\"]}"
107109
},
108110
]
@@ -170,7 +172,7 @@ resource "google_compute_node_template" "soletenant-tmpl" {
170172
name = "soletenant-tmpl-${var.cluster_name_suffix}"
171173
region = var.region
172174

173-
node_type = "n1-node-96-624"
175+
node_type = "c2-node-60-240"
174176
}
175177

176178
resource "google_compute_node_group" "soletenant-nodes" {

test/integration/testutils/utils.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,13 +94,10 @@ func TGKEAssertGolden(assert *assert.Assertions, golden *golden.GoldenFile, clus
9494
})
9595

9696
// Cluster assertions
97-
fmt.Sprintf("DEBUG: Cluster assertions")
9897
golden.JSONPathEqs(assert, *clusterJson, clusterCheckPaths)
9998

10099
// NodePool assertions
101-
fmt.Sprintf("DEBUG: NodePool assertions")
102100
for _, nodePool := range nodePools {
103-
fmt.Sprintf("DEBUG: NodePool assertions for %s", nodePool)
104101
assert.Truef(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)).Exists(), "NodePool not found: %s", nodePool)
105102

106103
nodeCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)))

0 commit comments

Comments
 (0)