File tree Expand file tree Collapse file tree 6 files changed +58
-33
lines changed
Expand file tree Collapse file tree 6 files changed +58
-33
lines changed Load Diff This file was deleted.
Original file line number Diff line number Diff line change @@ -69,7 +69,7 @@ resource "aws_iam_instance_profile" "quortex" {
6969data "aws_ami" "eks_worker_image" {
7070 filter {
7171 name = " name"
72- values = [" amazon-eks-node-al2023-x86_64-standard-${ local . kubernetes_worker_nodes_version } -v*" ]
72+ values = var . ami_al2023 ? [" amazon-eks-node-al2023-x86_64-standard- ${ local . kubernetes_worker_nodes_version } -v* " ] : [ " amazon-eks-node -${ local . kubernetes_worker_nodes_version } -v*" ]
7373 }
7474 most_recent = true
7575 owners = [" self" , " amazon" ]
@@ -91,12 +91,19 @@ resource "aws_launch_template" "quortex_launch_tpl" {
9191
9292 update_default_version = true
9393
94- user_data = base64encode (
94+ user_data = var . ami_al2023 ? base64encode (
9595 templatefile (
96- " ${ path . module } /userdata.sh.tpl" ,
96+ " ${ path . module } /templates/al2023_user_data.tpl" ,
97+ {
98+ discard_unpacked_layers = var.discard_unpacked_layers
99+ }
100+ )
101+ ) : base64encode (
102+ templatefile (
103+ " ${ path . module } /templates/al2_user_data.tpl" ,
97104 {
98105 warm_pool = lookup (each. value , " warm_pool_enabled" , false )
99- script = templatefile (" ${ path . module } /cluster_connect.sh.tpl" ,
106+ script = templatefile (" ${ path . module } /templates/ cluster_connect.sh.tpl" ,
100107 {
101108 cluster_name = aws_eks_cluster.quortex.name
102109 base64_cluster_ca = aws_eks_cluster.quortex.certificate_authority[0 ].data
Original file line number Diff line number Diff line change 1+ %{ if discard_unpacked_layers == false }
2+ ---
3+ apiVersion: node.eks.aws/v1alpha1
4+ kind: NodeConfig
5+ spec:
6+ containerd:
7+ config: |
8+ [plugins."io.containerd.grpc.v1.cri".containerd]
9+ discard_unpacked_layers = false
10+ %{ endif }
File renamed without changes.
Original file line number Diff line number Diff line change 1+ %{ if ami_al2023 == false }
2+ %{ if use_max_pods == false }
3+ # Calculate max pods
4+ KUBELET_CONFIG=/etc/kubernetes/kubelet/kubelet-config.json
5+ set +o pipefail
6+ CNI_VERSION=$(echo "${ cni_version} " | sed 's/^v//')
7+ MAX_PODS=$(/etc/eks/max-pods-calculator.sh --instance-type-from-imds \
8+ --cni-version $CNI_VERSION \
9+ %{ if show_max_allowed } --show-max-allowed%{ endif } \
10+ --cni-custom-networking-enabled)
11+ set -o pipefail
12+ if [[ -n "$MAX_PODS" ]]; then
13+ echo "$(jq ".maxPods=$MAX_PODS" $KUBELET_CONFIG)" > $KUBELET_CONFIG
14+ else
15+ echo "Not able to determine maxPods for instance. Not setting max pods for kubelet"
16+ fi
17+ %{ endif }
18+
19+ /etc/eks/bootstrap.sh ${ cluster_name} \
20+ --use-max-pods ${ use_max_pods} \
21+ --kubelet-extra-args '--node-labels=${ node_labels} --register-with-taints=${ node_taints} ${ kubelet_extra_args} ' \
22+ --b64-cluster-ca ${ base64_cluster_ca} \
23+ --apiserver-endpoint ${ api_server_url}
24+
25+ %{ if discard_unpacked_layers == false }
26+ # Retain unpacked layers. We avoid using the configuration merge capability of containerd due to a known bug.
27+ # For more details, refer to: https://github.com/containerd/containerd/issues/5837
28+ sed -i '/discard_unpacked_layers = true/s/true/false/' /etc/containerd/config.toml
29+ systemctl restart containerd
30+ %{ endif }
31+ %{ endif }
Original file line number Diff line number Diff line change @@ -216,6 +216,12 @@ variable "discard_unpacked_layers" {
216216 description = " Set to false to keep unpacked layers on the node after the image is pulled. By default, EKS will clean up the unpacked layers to save disk space."
217217}
218218
219+ variable "ami_al2023" {
220+ description = " Whether to use Amazon Linux 2023 AMI for worker nodes (only applies to advanced_node_groups)."
221+ type = bool
222+ default = false
223+ }
224+
219225variable "node_use_max_pods_allowed" {
220226 type = bool
221227 default = false
You can’t perform that action at this time.
0 commit comments