Skip to content

Commit 48acb4a

Browse files
committed
feat: Remove spegel layer configuration
1 parent fcec146 commit 48acb4a

File tree

6 files changed

+58
-33
lines changed

6 files changed

+58
-33
lines changed

cluster_connect.sh.tpl

Lines changed: 0 additions & 29 deletions
This file was deleted.

node_group_advanced.tf

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ resource "aws_iam_instance_profile" "quortex" {
6969
data "aws_ami" "eks_worker_image" {
7070
filter {
7171
name = "name"
72-
values = ["amazon-eks-node-al2023-x86_64-standard-${local.kubernetes_worker_nodes_version}-v*"]
72+
values = var.ami_al2023 ? ["amazon-eks-node-al2023-x86_64-standard-${local.kubernetes_worker_nodes_version}-v*"] : ["amazon-eks-node-${local.kubernetes_worker_nodes_version}-v*"]
7373
}
7474
most_recent = true
7575
owners = ["self", "amazon"]
@@ -91,12 +91,19 @@ resource "aws_launch_template" "quortex_launch_tpl" {
9191

9292
update_default_version = true
9393

94-
user_data = base64encode(
94+
user_data = var.ami_al2023 ? base64encode(
9595
templatefile(
96-
"${path.module}/userdata.sh.tpl",
96+
"${path.module}/templates/al2023_user_data.tpl",
97+
{
98+
discard_unpacked_layers = var.discard_unpacked_layers
99+
}
100+
)
101+
) : base64encode(
102+
templatefile(
103+
"${path.module}/templates/al2_user_data.tpl",
97104
{
98105
warm_pool = lookup(each.value, "warm_pool_enabled", false)
99-
script = templatefile("${path.module}/cluster_connect.sh.tpl",
106+
script = templatefile("${path.module}/templates/cluster_connect.sh.tpl",
100107
{
101108
cluster_name = aws_eks_cluster.quortex.name
102109
base64_cluster_ca = aws_eks_cluster.quortex.certificate_authority[0].data

templates/al2023_user_data.tpl

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
%{ if discard_unpacked_layers == false }
2+
---
3+
apiVersion: node.eks.aws/v1alpha1
4+
kind: NodeConfig
5+
spec:
6+
containerd:
7+
config: |
8+
[plugins."io.containerd.grpc.v1.cri".containerd]
9+
discard_unpacked_layers = false
10+
%{ endif }

templates/cluster_connect.sh.tpl

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
%{ if ami_al2023 == false }
2+
%{ if use_max_pods == false }
3+
# Calculate max pods
4+
KUBELET_CONFIG=/etc/kubernetes/kubelet/kubelet-config.json
5+
set +o pipefail
6+
CNI_VERSION=$(echo "${cni_version}" | sed 's/^v//')
7+
MAX_PODS=$(/etc/eks/max-pods-calculator.sh --instance-type-from-imds \
8+
--cni-version $CNI_VERSION \
9+
%{ if show_max_allowed } --show-max-allowed%{ endif } \
10+
--cni-custom-networking-enabled)
11+
set -o pipefail
12+
if [[ -n "$MAX_PODS" ]]; then
13+
echo "$(jq ".maxPods=$MAX_PODS" $KUBELET_CONFIG)" > $KUBELET_CONFIG
14+
else
15+
echo "Not able to determine maxPods for instance. Not setting max pods for kubelet"
16+
fi
17+
%{ endif }
18+
19+
/etc/eks/bootstrap.sh ${cluster_name} \
20+
--use-max-pods ${use_max_pods} \
21+
--kubelet-extra-args '--node-labels=${node_labels} --register-with-taints=${node_taints} ${kubelet_extra_args}' \
22+
--b64-cluster-ca ${base64_cluster_ca} \
23+
--apiserver-endpoint ${api_server_url}
24+
25+
%{ if discard_unpacked_layers == false }
26+
# Retain unpacked layers. We avoid using the configuration merge capability of containerd due to a known bug.
27+
# For more details, refer to: https://github.com/containerd/containerd/issues/5837
28+
sed -i '/discard_unpacked_layers = true/s/true/false/' /etc/containerd/config.toml
29+
systemctl restart containerd
30+
%{ endif }
31+
%{ endif }

variables.tf

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,12 @@ variable "discard_unpacked_layers" {
216216
description = "Set to false to keep unpacked layers on the node after the image is pulled. By default, EKS will clean up the unpacked layers to save disk space."
217217
}
218218

219+
variable "ami_al2023" {
220+
description = "Whether to use Amazon Linux 2023 AMI for worker nodes (only applies to advanced_node_groups)."
221+
type = bool
222+
default = false
223+
}
224+
219225
variable "node_use_max_pods_allowed" {
220226
type = bool
221227
default = false

0 commit comments

Comments
 (0)