@@ -43,106 +43,41 @@ resource "local_file" "terraform_state" {
43
43
44
44
data "google_client_config" "current" {}
45
45
46
- resource "google_container_cluster" "llvm_premerge" {
47
- name = var. cluster_name
48
- location = " us-central1-a"
49
-
50
- # We can't create a cluster with no node pool defined, but we want to only use
51
- # separately managed node pools. So we create the smallest possible default
52
- # node pool and immediately delete it.
53
- remove_default_node_pool = true
54
- initial_node_count = 1
55
-
56
- # Set the networking mode to VPC Native to enable IP aliasing, which is required
57
- # for adding windows nodes to the cluster.
58
- networking_mode = " VPC_NATIVE"
59
- ip_allocation_policy {}
60
- }
61
-
62
- resource "google_container_node_pool" "llvm_premerge_linux_service" {
63
- name = " llvm-premerge-linux-service"
64
- location = " us-central1-a"
65
- cluster = google_container_cluster. llvm_premerge . name
66
- node_count = 3
67
-
68
- node_config {
69
- machine_type = " e2-highcpu-4"
70
- }
46
+ module "premerge_cluster" {
47
+ source = " ./gke_cluster"
48
+ cluster_name = " llvm-premerge-prototype"
49
+ region = " us-central1-a"
71
50
}
72
51
73
- resource "google_container_node_pool" "llvm_premerge_linux" {
74
- name = " llvm-premerge-linux"
75
- location = " us-central1-a"
76
- cluster = google_container_cluster. llvm_premerge . name
77
- initial_node_count = 0
78
-
79
- autoscaling {
80
- total_min_node_count = 0
81
- total_max_node_count = 8
82
- }
83
-
84
- node_config {
85
- machine_type = " n2-standard-64"
86
- taint {
87
- key = " premerge-platform"
88
- value = " linux"
89
- effect = " NO_SCHEDULE"
90
- }
91
- labels = {
92
- " premerge-platform" : " linux"
93
- }
94
- disk_size_gb = 200
95
- # Terraform wants to recreate the node pool everytime whe running
96
- # terraform apply unless we explicitly set this.
97
- # TODO(boomanaiden154): Look into why terraform is doing this so we do
98
- # not need this hack.
99
- resource_labels = {
100
- " goog-gke-node-pool-provisioning-model" = " on-demand"
101
- }
102
- }
52
+ # TODO(boomanaiden154): Remove these moved blocks once we have finished
53
+ # updating everything to use the new module.
54
+ moved {
55
+ from = google_container_cluster. llvm_premerge
56
+ to = module. premerge_cluster . google_container_cluster . llvm_premerge
103
57
}
104
58
105
- resource "google_container_node_pool" "llvm_premerge_windows" {
106
- name = " llvm-premerge-windows"
107
- location = " us-central1-a"
108
- cluster = google_container_cluster. llvm_premerge . name
109
- initial_node_count = 0
59
+ moved {
60
+ from = google_container_node_pool. llvm_premerge_linux
61
+ to = module. premerge_cluster . google_container_node_pool . llvm_premerge_linux
62
+ }
110
63
111
- autoscaling {
112
- total_min_node_count = 0
113
- total_max_node_count = 16
114
- }
64
+ moved {
65
+ from = google_container_node_pool . llvm_premerge_linux_service
66
+ to = module . premerge_cluster . google_container_node_pool . llvm_premerge_linux_service
67
+ }
115
68
116
- # We do not set a taint for the windows nodes as kubernetes by default sets
117
- # a node.kubernetes.io/os taint for windows nodes.
118
- node_config {
119
- machine_type = " n2-standard-32"
120
- labels = {
121
- " premerge-platform" : " windows"
122
- }
123
- image_type = " WINDOWS_LTSC_CONTAINERD"
124
- # Add a script that runs on the initial boot to disable Windows Defender.
125
- # Windows Defender causes an increase in test times by approximately an
126
- # order of magnitude.
127
- metadata = {
128
- " sysprep-specialize-script-ps1" = " Set-MpPreference -DisableRealtimeMonitoring $true"
129
- # Terraform wants to recreate the node pool everytime whe running
130
- # terraform apply unless we explicitly set this.
131
- # TODO(boomanaiden154): Look into why terraform is doing this so we do
132
- # not need this hack.
133
- " disable-legacy-endpoints" = " true"
134
- }
135
- disk_size_gb = 200
136
- }
69
+ moved {
70
+ from = google_container_node_pool. llvm_premerge_windows
71
+ to = module. premerge_cluster . google_container_node_pool . llvm_premerge_windows
137
72
}
138
73
139
74
provider "helm" {
140
75
kubernetes {
141
- host = google_container_cluster . llvm_premerge . endpoint
76
+ host = module . premerge_cluster . endpoint
142
77
token = data. google_client_config . current . access_token
143
- client_certificate = base64decode (google_container_cluster . llvm_premerge . master_auth . 0 . client_certificate )
144
- client_key = base64decode (google_container_cluster . llvm_premerge . master_auth . 0 . client_key )
145
- cluster_ca_certificate = base64decode (google_container_cluster . llvm_premerge . master_auth . 0 . cluster_ca_certificate )
78
+ client_certificate = base64decode (module . premerge_cluster . client_certificate )
79
+ client_key = base64decode (module . premerge_cluster . client_key )
80
+ cluster_ca_certificate = base64decode (module . premerge_cluster . cluster_ca_certificate )
146
81
}
147
82
}
148
83
@@ -163,10 +98,10 @@ data "google_secret_manager_secret_version" "grafana_token" {
163
98
}
164
99
165
100
provider "kubernetes" {
166
- host = " https://${ google_container_cluster . llvm_premerge . endpoint } "
101
+ host = " https://${ module . premerge_cluster . endpoint } "
167
102
token = data. google_client_config . current . access_token
168
103
cluster_ca_certificate = base64decode (
169
- google_container_cluster . llvm_premerge . master_auth [ 0 ] . cluster_ca_certificate ,
104
+ module . premerge_cluster . cluster_ca_certificate
170
105
)
171
106
}
172
107
0 commit comments