Skip to content

Commit b814a30

Browse files
committed
fix: move EKS to auto mode
1 parent 18fed55 commit b814a30

13 files changed

+80
-190
lines changed

aws/irsa.tf

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ resource "aws_iam_role" "user_role" {
7272
name = "cant-read-secrets"
7373

7474
assume_role_policy = data.aws_iam_policy_document.user_assume_role.json
75+
tags = var.tags
7576
}
7677

7778
data "aws_iam_policy_document" "user_assume_role" {
@@ -94,6 +95,7 @@ resource "aws_iam_policy" "secret_deny" {
9495
name_prefix = "secret-deny"
9596
description = "Deny secrets manager and SSM"
9697
policy = data.aws_iam_policy_document.user_policy.json
98+
tags = var.tags
9799
}
98100

99101
data "aws_iam_policy_document" "user_policy" {
@@ -123,15 +125,16 @@ data "aws_iam_policy_document" "user_policy" {
123125
}
124126

125127
module "ebs_csi_irsa_role" {
126-
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
128+
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts"
127129
version = "~> 6.0"
128-
role_name = "ebs-csi"
130+
name = "ebs-csi"
129131
attach_ebs_csi_policy = true
130132

131133
oidc_providers = {
132134
ex = {
133135
provider_arn = module.eks.oidc_provider_arn
134-
namespace_service_accounts = ["consul:server", "kube-system:ebs-csi-controller-sa"]
136+
namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"]
135137
}
136138
}
139+
tags = var.tags
137140
}

aws/k8s-aws-alb-script-cleanup.sh

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -28,24 +28,4 @@ echo "cleanup k8s ingress and service. This may take a while"
2828
kubectl delete service secret-challenge
2929
kubectl delete ingress wrongsecrets
3030

31-
echo "Cleanup helm chart"
32-
helm uninstall aws-load-balancer-controller \
33-
-n kube-system
34-
35-
echo "Cleanup k8s ALB"
36-
kubectl delete -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master"
37-
38-
echo "Cleanup iam serviceaccount and policy"
39-
eksctl delete iamserviceaccount \
40-
--cluster $CLUSTERNAME \
41-
--name aws-load-balancer-controller \
42-
--namespace kube-system \
43-
--region $AWS_REGION
44-
45-
sleep 5 # Prevents race condition - command below may error out because it's still 'attached'
46-
47-
aws iam delete-policy \
48-
--policy-arn arn:aws:iam::${ACCOUNT_ID}:policy/AWSLoadBalancerControllerIAMPolicy
49-
50-
echo "Wait for 10 seconds to let the AWS resources be cleaned up"
5131
sleep 10

aws/k8s-aws-alb-script.sh

Lines changed: 4 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -5,89 +5,11 @@
55

66
source ../scripts/check-available-commands.sh
77

8-
checkCommandsAvailable aws cat docker eksctl grep helm jq kubectl sed terraform vault
8+
checkCommandsAvailable kubectl
99

10-
if test -n "${AWS_REGION-}"; then
11-
echo "AWS_REGION is set to <$AWS_REGION>"
12-
else
13-
AWS_REGION=eu-west-1
14-
echo "AWS_REGION is not set or empty, defaulting to ${AWS_REGION}"
15-
fi
16-
17-
if test -n "${CLUSTERNAME-}"; then
18-
echo "CLUSTERNAME is set to <$CLUSTERNAME>"
19-
else
20-
CLUSTERNAME=wrongsecrets-exercise-cluster
21-
echo "CLUSTERNAME is not set or empty, defaulting to ${CLUSTERNAME}"
22-
fi
23-
24-
ACCOUNT_ID=$(aws sts get-caller-identity | jq '.Account' -r)
25-
echo "ACCOUNT_ID=${ACCOUNT_ID}"
26-
27-
LBC_VERSION="v2.13.4"
28-
echo "LBC_VERSION=$LBC_VERSION"
29-
30-
# echo "executing eksctl utils associate-iam-oidc-provider"
31-
# eksctl utils associate-iam-oidc-provider \
32-
# --region ${AWS_REGION} \
33-
# --cluster ${CLUSTERNAME} \
34-
# --approve
35-
36-
echo "creating iam policy"
37-
curl -o iam_policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/"${LBC_VERSION}"/docs/install/iam_policy.json
38-
aws iam create-policy \
39-
--policy-name AWSLoadBalancerControllerIAMPolicy \
40-
--policy-document file://iam_policy.json
41-
42-
echo "creating iam service account for cluster ${CLUSTERNAME}"
43-
eksctl create iamserviceaccount \
44-
--cluster $CLUSTERNAME \
45-
--namespace kube-system \
46-
--name aws-load-balancer-controller \
47-
--attach-policy-arn arn:aws:iam::${ACCOUNT_ID}:policy/AWSLoadBalancerControllerIAMPolicy \
48-
--override-existing-serviceaccounts \
49-
--region $AWS_REGION \
50-
--approve
51-
52-
echo "setting up kubectl"
53-
54-
aws eks update-kubeconfig --region $AWS_REGION --name $CLUSTERNAME --kubeconfig ~/.kube/wrongsecrets
55-
56-
export KUBECONFIG=~/.kube/wrongsecrets
57-
58-
echo "applying aws-lbc with kubectl"
59-
60-
kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master"
61-
62-
kubectl get crd
63-
64-
echo "do helm eks application"
65-
helm repo add eks https://aws.github.io/eks-charts
66-
helm repo update eks
67-
68-
echo "upgrade alb controller with helm"
69-
helm upgrade -i aws-load-balancer-controller \
70-
eks/aws-load-balancer-controller \
71-
-n kube-system \
72-
--set clusterName=${CLUSTERNAME} \
73-
--set serviceAccount.create=false \
74-
--set serviceAccount.name=aws-load-balancer-controller \
75-
--set image.tag="${LBC_VERSION}" \
76-
--set region=${AWS_REGION} \
77-
--set image.repository=602401143452.dkr.ecr.${AWS_REGION}.amazonaws.com/amazon/aws-load-balancer-controller
78-
# You may need to modify the account ID above if you're operating in af-south-1, ap-east-1, ap-southeast-3, cn-north and cn-northwest, eu-south-1, me-south-1, or the govcloud.
79-
# See the full list of accounts per regions here: https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
80-
81-
echo "wait with rollout for 10 s"
82-
sleep 10
83-
84-
echo "rollout status deployment"
85-
kubectl -n kube-system rollout status deployment aws-load-balancer-controller
86-
87-
echo "wait after rollout for 10 s"
88-
sleep 10
89-
90-
EKS_CLUSTER_VERSION=$(aws eks describe-cluster --name $CLUSTERNAME --region $AWS_REGION --query cluster.version --output text)
10+
echo "set up ingress class"
11+
kubectl apply -f ./k8s/ingress-class-params.yaml
12+
kubectl apply -f ./k8s/ingress-class.yaml
9113

9214
echo "apply -f k8s/secret-challenge-vault-service.yml in 10 s"
9315
sleep 10

aws/k8s-vault-aws-start.sh

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ fi
1717
if test -n "${CLUSTERNAME-}"; then
1818
echo "CLUSTERNAME is set to <$CLUSTERNAME>"
1919
else
20-
CLUSTERNAME=wrongsecrets-exercise-cluster
20+
CLUSTERNAME=wrongsecrets
2121
echo "CLUSTERNAME is not set or empty, defaulting to ${CLUSTERNAME}"
2222
fi
2323

@@ -61,18 +61,21 @@ else
6161
kubectl apply -f ../k8s/challenge33.yml
6262
fi
6363

64-
helm list -n | grep 'aws-ebs-csi-driver' &> /dev/null
65-
if [ $? == 0 ]; then
66-
echo "AWS EBS CSI driver is already installed"
67-
else
68-
echo "Installing AWS EBS CSI driver"
69-
helm repo add aws-ebs-csi-driver https://kubernetes-sigs.github.io/aws-ebs-csi-driver
70-
helm repo update
71-
helm upgrade --install aws-ebs-csi-driver --version 2.32.0 \
72-
--namespace kube-system \
73-
aws-ebs-csi-driver/aws-ebs-csi-driver \
74-
--values ./k8s/ebs-csi-driver-values.yaml
75-
fi
64+
#helm list -n | grep 'aws-ebs-csi-driver' &>/dev/null
65+
#if [ $? == 0 ]; then
66+
# echo "AWS EBS CSI driver is already installed"
67+
#else
68+
# echo "Installing AWS EBS CSI driver"
69+
# helm repo add aws-ebs-csi-driver https://kubernetes-sigs.github.io/aws-ebs-csi-driver
70+
# helm repo update
71+
# helm upgrade --install aws-ebs-csi-driver --version 2.32.0 \
72+
# --namespace kube-system \
73+
# aws-ebs-csi-driver/aws-ebs-csi-driver \
74+
# --values ./k8s/ebs-csi-driver-values.yaml
75+
#fi
76+
77+
echo "Setting up gp3 storage class..."
78+
kubectl apply -f ./k8s/ebs-csi-gp3.yaml
7679

7780
source ../scripts/install-vault.sh
7881

aws/k8s/ebs-csi-driver-values.yaml

Lines changed: 0 additions & 11 deletions
This file was deleted.

aws/k8s/ebs-csi-gp3.yaml

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
kind: StorageClass
2+
apiVersion: storage.k8s.io/v1
3+
metadata:
4+
name: gp3
5+
annotations:
6+
storageclass.kubernetes.io/is-default-class: "true"
7+
allowedTopologies:
8+
- matchLabelExpressions:
9+
- key: eks.amazonaws.com/compute-type
10+
values:
11+
- auto
12+
provisioner: ebs.csi.eks.amazonaws.com
13+
volumeBindingMode: WaitForFirstConsumer
14+
reclaimPolicy: Delete
15+
allowVolumeExpansion: true
16+
# The following parameters are specific to the EBS CSI driver.
17+
parameters:
18+
type: gp3
19+
encrypted: "true"
20+
tagSpecification_1: Application=wrongsecrets
21+

aws/k8s/ingress-class-params.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
apiVersion: eks.amazonaws.com/v1
2+
kind: IngressClassParams
3+
metadata:
4+
name: alb
5+
spec:
6+
scheme: internet-facing

aws/k8s/ingress-class.yaml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
apiVersion: networking.k8s.io/v1
2+
kind: IngressClass
3+
metadata:
4+
name: alb
5+
annotations:
6+
ingressclass.kubernetes.io/is-default-class: "true"
7+
spec:
8+
controller: eks.amazonaws.com/alb
9+
parameters:
10+
apiGroup: eks.amazonaws.com
11+
kind: IngressClassParams
12+
name: alb

aws/main.tf

Lines changed: 12 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -67,65 +67,29 @@ module "eks" {
6767
source = "terraform-aws-modules/eks/aws"
6868
version = "21.3.1"
6969

70-
cluster_name = var.cluster_name
71-
cluster_version = var.cluster_version
70+
name = var.cluster_name
71+
kubernetes_version = var.cluster_version
7272

7373
vpc_id = module.vpc.vpc_id
7474
subnet_ids = module.vpc.private_subnets
7575

7676

77-
cluster_endpoint_private_access = true
78-
cluster_endpoint_public_access = true
77+
endpoint_private_access = true
78+
endpoint_public_access = true
7979

80-
cluster_endpoint_public_access_cidrs = ["${data.http.ip.response_body}/32"]
80+
endpoint_public_access_cidrs = ["${data.http.ip.response_body}/32"]
8181

82-
enable_irsa = true
82+
#create_auto_mode_iam_resources = true
8383

8484
enable_cluster_creator_admin_permissions = true
8585

86-
eks_managed_node_group_defaults = {
87-
disk_size = 50
88-
disk_type = "gp3"
89-
disk_throughput = 150
90-
disk_iops = 3000
91-
instance_types = ["t3.large"]
92-
93-
iam_role_additional_policies = {
94-
AmazonEKSWorkerNodePolicy : "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
95-
AmazonEKS_CNI_Policy : "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
96-
AmazonEC2ContainerRegistryReadOnly : "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
97-
AmazonSSMManagedInstanceCore : "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore",
98-
AmazonEKSVPCResourceController : "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController",
99-
AmazonEBSCSIDriverPolicy : "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
100-
}
86+
upgrade_policy = {
87+
support_type = "STANDARD"
10188
}
10289

103-
eks_managed_node_groups = {
104-
bottlerocket_default = {
105-
use_custom_launch_template = false
106-
min_size = 1
107-
max_size = 3
108-
desired_size = 1
109-
capacity_type = "SPOT"
110-
111-
ami_type = "BOTTLEROCKET_x86_64"
112-
platform = "bottlerocket"
113-
}
114-
}
115-
116-
node_security_group_additional_rules = {
117-
aws_lb_controller_webhook = {
118-
description = "Cluster API to AWS LB Controller webhook"
119-
protocol = "all"
120-
from_port = 9443
121-
to_port = 9443
122-
type = "ingress"
123-
source_cluster_security_group = true
124-
}
125-
}
126-
127-
tags = {
128-
Environment = "test"
129-
Application = "wrongsecrets"
90+
compute_config = {
91+
enabled = true
92+
node_pools = ["general-purpose", "system"]
13093
}
94+
tags = var.tags
13195
}

aws/secrets.tf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ POLICY
4949
resource "aws_secretsmanager_secret" "secret_2" {
5050
name = "wrongsecret-2"
5151
recovery_window_in_days = 0
52+
tags = var.tags
5253
}
5354

5455
resource "aws_secretsmanager_secret_policy" "policy_2" {

0 commit comments

Comments
 (0)