-
Notifications
You must be signed in to change notification settings - Fork 506
Expand file tree
/
Copy pathextra_node_pool.tf
More file actions
344 lines (314 loc) · 19.8 KB
/
extra_node_pool.tf
File metadata and controls
344 lines (314 loc) · 19.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
moved {
from = azurerm_kubernetes_cluster_node_pool.node_pool
to = azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy
}
resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" {
for_each = local.node_pools_create_before_destroy
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
name = "${each.value.name}${substr(md5(uuid()), 0, 4)}"
auto_scaling_enabled = each.value.auto_scaling_enabled
capacity_reservation_group_id = each.value.capacity_reservation_group_id
eviction_policy = each.value.eviction_policy
fips_enabled = each.value.fips_enabled
gpu_instance = each.value.gpu_instance
gpu_driver = each.value.gpu_driver
host_encryption_enabled = each.value.host_encryption_enabled
host_group_id = each.value.host_group_id
kubelet_disk_type = each.value.kubelet_disk_type
max_count = each.value.max_count
max_pods = each.value.max_pods
min_count = each.value.min_count
mode = each.value.mode
node_count = each.value.node_count
node_labels = each.value.node_labels
node_public_ip_enabled = each.value.node_public_ip_enabled
node_public_ip_prefix_id = each.value.node_public_ip_prefix_id
node_taints = each.value.node_taints
orchestrator_version = each.value.orchestrator_version
os_disk_size_gb = each.value.os_disk_size_gb
os_disk_type = each.value.os_disk_type
os_sku = each.value.os_sku
os_type = each.value.os_type
pod_subnet_id = try(each.value.pod_subnet.id, null)
priority = each.value.priority
proximity_placement_group_id = each.value.proximity_placement_group_id
scale_down_mode = each.value.scale_down_mode
snapshot_id = each.value.snapshot_id
spot_max_price = each.value.spot_max_price
tags = each.value.tags
temporary_name_for_rotation = each.value.temporary_name_for_rotation
ultra_ssd_enabled = each.value.ultra_ssd_enabled
vm_size = each.value.vm_size
vnet_subnet_id = try(each.value.vnet_subnet.id, null)
workload_runtime = each.value.workload_runtime
zones = each.value.zones
dynamic "kubelet_config" {
for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"]
content {
allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls
container_log_max_line = each.value.kubelet_config.container_log_max_files
container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb
cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled
cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period
cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy
image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold
image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold
pod_max_pid = each.value.kubelet_config.pod_max_pid
topology_manager_policy = each.value.kubelet_config.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"]
content {
swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb
transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag
transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"]
content {
fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr
fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max
fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches
fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open
kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max
net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog
net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max
net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default
net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max
net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn
net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default
net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max
net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max
vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count
vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness
vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure
}
}
}
}
dynamic "node_network_profile" {
for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"]
content {
application_security_group_ids = each.value.node_network_profile.application_security_group_ids
node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags
dynamic "allowed_host_ports" {
for_each = each.value.node_network_profile.allowed_host_ports == null ? [] : each.value.node_network_profile.allowed_host_ports
content {
port_end = allowed_host_ports.value.port_end
port_start = allowed_host_ports.value.port_start
protocol = allowed_host_ports.value.protocol
}
}
}
}
dynamic "upgrade_settings" {
for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"]
content {
max_surge = each.value.upgrade_settings.max_surge
drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes
node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes
}
}
dynamic "windows_profile" {
for_each = each.value.windows_profile == null ? [] : ["windows_profile"]
content {
outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled
}
}
depends_on = [azapi_update_resource.aks_cluster_post_create]
lifecycle {
create_before_destroy = true
ignore_changes = [
name
]
replace_triggered_by = [
null_resource.pool_name_keeper[each.key],
]
precondition {
condition = can(regex("[a-z0-9]{1,8}", each.value.name))
error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)"
}
precondition {
condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size))
error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. "
}
precondition {
condition = var.agents_type == "VirtualMachineScaleSets"
error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets."
}
precondition {
condition = each.value.auto_scaling_enabled == true || try(each.value.node_count, 0) >= 1
error_message = "`node_count` must be set to a value >= 1 when `auto_scaling_enabled` is `false` for node pool '${each.value.name}'."
}
precondition {
condition = var.node_provisioning_profile == null || try(var.node_provisioning_profile.mode, null) != "Auto" || each.value.auto_scaling_enabled != true
error_message = "`auto_scaling_enabled` must be `false` on all node pools when `node_provisioning_profile.mode` is set to `Auto`."
}
}
}
resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" {
for_each = local.node_pools_create_after_destroy
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
name = each.value.name
auto_scaling_enabled = each.value.auto_scaling_enabled
capacity_reservation_group_id = each.value.capacity_reservation_group_id
eviction_policy = each.value.eviction_policy
fips_enabled = each.value.fips_enabled
gpu_instance = each.value.gpu_instance
gpu_driver = each.value.gpu_driver
host_encryption_enabled = each.value.host_encryption_enabled
host_group_id = each.value.host_group_id
kubelet_disk_type = each.value.kubelet_disk_type
max_count = each.value.max_count
max_pods = each.value.max_pods
min_count = each.value.min_count
mode = each.value.mode
node_count = each.value.node_count
node_labels = each.value.node_labels
node_public_ip_enabled = each.value.node_public_ip_enabled
node_public_ip_prefix_id = each.value.node_public_ip_prefix_id
node_taints = each.value.node_taints
orchestrator_version = each.value.orchestrator_version
os_disk_size_gb = each.value.os_disk_size_gb
os_disk_type = each.value.os_disk_type
os_sku = each.value.os_sku
os_type = each.value.os_type
pod_subnet_id = try(each.value.pod_subnet.id, null)
priority = each.value.priority
proximity_placement_group_id = each.value.proximity_placement_group_id
scale_down_mode = each.value.scale_down_mode
snapshot_id = each.value.snapshot_id
spot_max_price = each.value.spot_max_price
tags = each.value.tags
temporary_name_for_rotation = each.value.temporary_name_for_rotation
ultra_ssd_enabled = each.value.ultra_ssd_enabled
vm_size = each.value.vm_size
vnet_subnet_id = try(each.value.vnet_subnet.id, null)
workload_runtime = each.value.workload_runtime
zones = each.value.zones
dynamic "kubelet_config" {
for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"]
content {
allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls
container_log_max_line = each.value.kubelet_config.container_log_max_files
container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb
cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled
cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period
cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy
image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold
image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold
pod_max_pid = each.value.kubelet_config.pod_max_pid
topology_manager_policy = each.value.kubelet_config.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"]
content {
swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb
transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag
transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"]
content {
fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr
fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max
fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches
fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open
kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max
net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog
net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max
net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default
net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max
net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn
net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default
net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max
net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max
vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count
vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness
vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure
}
}
}
}
dynamic "node_network_profile" {
for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"]
content {
node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags
}
}
dynamic "upgrade_settings" {
for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"]
content {
max_surge = each.value.upgrade_settings.max_surge
drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes
node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes
}
}
dynamic "windows_profile" {
for_each = each.value.windows_profile == null ? [] : ["windows_profile"]
content {
outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled
}
}
depends_on = [azapi_update_resource.aks_cluster_post_create]
lifecycle {
precondition {
condition = can(regex("[a-z0-9]{1,8}", each.value.name))
error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)"
}
precondition {
condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size))
error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. "
}
precondition {
condition = var.agents_type == "VirtualMachineScaleSets"
error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets."
}
precondition {
condition = each.value.auto_scaling_enabled == true || try(each.value.node_count, 0) >= 1
error_message = "`node_count` must be set to a value >= 1 when `auto_scaling_enabled` is `false` for node pool '${each.value.name}'."
}
precondition {
condition = var.node_provisioning_profile == null || try(var.node_provisioning_profile.mode, null) != "Auto" || each.value.auto_scaling_enabled != true
error_message = "`auto_scaling_enabled` must be `false` on all node pools when `node_provisioning_profile.mode` is set to `Auto`."
}
}
}
resource "null_resource" "pool_name_keeper" {
for_each = var.node_pools
triggers = {
pool_name = each.value.name
}
lifecycle {
precondition {
condition = !var.create_role_assignment_network_contributor || length(distinct(local.subnet_ids)) == length(local.subnet_ids)
error_message = "When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself."
}
}
}