Skip to content

Commit 7b4f461

Browse files
committed
feat: refactor part of bash script to be included in TF
1 parent f46af25 commit 7b4f461

File tree

8 files changed

+296
-204
lines changed

8 files changed

+296
-204
lines changed

aws/README.md

Lines changed: 71 additions & 71 deletions
Large diffs are not rendered by default.

aws/build-an-deploy-aws.sh

Lines changed: 71 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,10 @@ echo "Make sure you have updated your AWS credentials and your kubeconfig prior
55
echo "For this to work the AWS kubernetes cluster must have access to the same local registry / image cache which 'docker build ...' writes its image to"
66
echo "For example docker-desktop with its included k8s cluster"
77

8-
echo "NOTE: WE ARE WORKING HERE WITH A 5 LEGGED BALANCER on aWS which costs money by themselves!"
8+
echo "NOTE: WE ARE WORKING HERE WITH A 5 LEGGED LOAD BALANCER on AWS which costs money by themselves!"
99

10-
echo "NOTE2: please replace balancer.cookie.cookieParserSecret witha value you fanchy and ensure you have TLS on (see outdated guides)."
10+
echo "NOTE 2: You can replace balancer.cookie.cookieParserSecret with a value you fancy."
11+
echo "Note 3: Ensure you turn TLS on :)."
1112

1213
echo "Usage: ./build-an-deploy-aws.sh "
1314

@@ -17,17 +18,10 @@ checkCommandsAvailable helm aws kubectl eksctl sed
1718
if test -n "${AWS_REGION-}"; then
1819
echo "AWS_REGION is set to <$AWS_REGION>"
1920
else
20-
AWS_REGION=eu-west-1
21+
export AWS_REGION=eu-west-1
2122
echo "AWS_REGION is not set or empty, defaulting to ${AWS_REGION}"
2223
fi
2324

24-
if test -n "${CLUSTERNAME-}"; then
25-
secho "CLUSTERNAME is set to <$CLUSTERNAME> which is different than the default. Please update the cluster-autoscaler-policy.json."
26-
else
27-
CLUSTERNAME=wrongsecrets-exercise-cluster
28-
echo "CLUSTERNAME is not set or empty, defaulting to ${CLUSTERNAME}"
29-
fi
30-
3125
echo "Checking for compatible shell"
3226
case "$SHELL" in
3327
*bash*)
@@ -45,12 +39,13 @@ esac
4539
ACCOUNT_ID=$(aws sts get-caller-identity | jq '.Account' -r)
4640
echo "ACCOUNT_ID=${ACCOUNT_ID}"
4741

42+
CLUSTERNAME="$(terraform output -raw cluster_name)"
43+
STATE_BUCKET="$(terraform output -raw state_bucket_name)"
44+
IRSA_ROLE_ARN="$(terraform output -raw irsa_role_arn)"
45+
EBS_ROLE_ARN="$(terraform output -raw ebs_role_arn)"
4846

4947
version="$(uuidgen)"
5048

51-
AWS_REGION="eu-west-1"
52-
53-
echo "Install autoscaler first!"
5449
echo "If the below output is different than expected: please hard stop this script (running aws sts get-caller-identity first)"
5550

5651
aws sts get-caller-identity
@@ -59,23 +54,23 @@ echo "Giving you 4 seconds before we add autoscaling"
5954

6055
sleep 4
6156

62-
echo "Installing policies and service accounts"
57+
# echo "Installing policies and service accounts"
6358

64-
aws iam create-policy \
65-
--policy-name AmazonEKSClusterAutoscalerPolicy \
66-
--policy-document file://cluster-autoscaler-policy.json
59+
# aws iam create-policy \
60+
# --policy-name AmazonEKSClusterAutoscalerPolicy \
61+
# --policy-document file://cluster-autoscaler-policy.json
6762

68-
echo "Installing iamserviceaccount"
63+
# echo "Installing iamserviceaccount"
6964

70-
eksctl create iamserviceaccount \
71-
--cluster=$CLUSTERNAME \
72-
--region=$AWS_REGION \
73-
--namespace=kube-system \
74-
--name=cluster-autoscaler \
75-
--role-name=AmazonEKSClusterAutoscalerRole \
76-
--attach-policy-arn=arn:aws:iam::${ACCOUNT_ID}:policy/AmazonEKSClusterAutoscalerPolicy \
77-
--override-existing-serviceaccounts \
78-
--approve
65+
# eksctl create iamserviceaccount \
66+
# --cluster=$CLUSTERNAME \
67+
# --region=$AWS_REGION \
68+
# --namespace=kube-system \
69+
# --name=cluster-autoscaler \
70+
# --role-name=AmazonEKSClusterAutoscalerRole \
71+
# --attach-policy-arn=arn:aws:iam::${ACCOUNT_ID}:policy/AmazonEKSClusterAutoscalerPolicy \
72+
# --override-existing-serviceaccounts \
73+
# --approve
7974

8075
echo "Deploying the k8s autoscaler for eks through kubectl"
8176

@@ -87,7 +82,7 @@ kubectl apply -f cluster-autoscaler-autodiscover.yaml
8782
echo "annotating service account for cluster-autoscaler"
8883
kubectl annotate serviceaccount cluster-autoscaler \
8984
-n kube-system \
90-
eks.amazonaws.com/role-arn=arn:aws:iam::${ACCOUNT_ID}:role/AmazonEKSClusterAutoscalerRole
85+
eks.amazonaws.com/role-arn=${CLUSTER_AUTOSCALER}
9186

9287
kubectl patch deployment cluster-autoscaler \
9388
-n kube-system \
@@ -123,43 +118,62 @@ kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/late
123118

124119
wait
125120

126-
DEFAULT_PASSWORD=thankyou
127-
#TODO: REWRITE ABOVE, REWRITE THE HARDCODED DEPLOYMENT VALS INTO VALUES AND OVERRIDE THEM HERE!
128-
echo "default password is ${DEFAULT_PASSWORD}"
121+
# if passed as arguments, use those
122+
# otherwise, create new default values
123+
124+
if [[ -z $APP_PASSWORD ]]; then
125+
echo "No app password passed, creating a new one"
126+
APP_PASSWORD="$(uuidgen)"
127+
else
128+
echo "App password already set"
129+
fi
130+
131+
if [[ -z $CREATE_TEAM_HMAC ]]; then
132+
CREATE_TEAM_HMAC="$(openssl rand -base64 24)"
133+
else
134+
echo "Create team HMAC already set"
135+
fi
136+
137+
if [[ -z $COOKIE_PARSER_SECRET ]]; then
138+
COOKIE_PARSER_SECRET="$(openssl rand -base64 24)"
139+
else
140+
echo "Cookie parser secret already set"
141+
fi
142+
143+
echo "App password is ${APP_PASSWORD}"
129144
helm upgrade --install mj ../helm/wrongsecrets-ctf-party \
130-
--set="imagePullPolicy=Always" \
131145
--set="balancer.env.K8S_ENV=aws" \
132-
--set="balancer.env.IRSA_ROLE=arn:aws:iam::${ACCOUNT_ID}:role/wrongsecrets-secret-manager" \
133-
--set="balancer.env.REACT_APP_ACCESS_PASSWORD=${DEFAULT_PASSWORD}" \
134-
--set="balancer.cookie.cookieParserSecret=thisisanewrandomvaluesowecanworkatit" \
135-
--set="balancer.repository=jeroenwillemsen/wrongsecrets-balancer" \
136-
--set="balancer.replicas=4" \
137-
--set="wrongsecretsCleanup.repository=jeroenwillemsen/wrongsecrets-ctf-cleaner" \
138-
--set="wrongsecrets.ctfKey=test" # this key isn't actually necessary in a setup with CTFd
146+
--set="balancer.env.IRSA_ROLE=${IRSA_ROLE_ARN}" \
147+
--set="balancer.env.REACT_APP_ACCESS_PASSWORD=${APP_PASSWORD}" \
148+
--set="balancer.env.REACT_APP_S3_BUCKET_URL=s3://${STATE_BUCKET}" \
149+
--set="balancer.env.REACT_APP_CREATE_TEAM_HMAC_KEY=${CREATE_TEAM_HMAC}" \
150+
--set="balancer.cookie.cookieParserSecret=${COOKIE_PARSER_SECRET}"
151+
152+
# echo "Installing EBS CSI driver"
153+
# eksctl create iamserviceaccount \
154+
# --name ebs-csi-controller-sa \
155+
# --namespace kube-system \
156+
# --cluster $CLUSTERNAME \
157+
# --attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \
158+
# --approve \
159+
# --role-only \
160+
# --role-name AmazonEKS_EBS_CSI_DriverRole
161+
# --region $AWS_REGION
162+
163+
# echo "managing EBS CSI Driver as a separate eks addon"
164+
# eksctl create addon --name aws-ebs-csi-driver \
165+
# --cluster $CLUSTERNAME \
166+
# --service-account-role-arn arn:aws:iam::${ACCOUNT_ID}:role/AmazonEKS_EBS_CSI_DriverRole \
167+
# --force \
168+
# --region $AWS_REGION
139169

140170
# Install CTFd
141171

142-
echo "Installing EBS CSI driver"
143-
eksctl create iamserviceaccount \
144-
--name ebs-csi-controller-sa \
145-
--namespace kube-system \
146-
--cluster $CLUSTERNAME \
147-
--attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \
148-
--approve \
149-
--role-only \
150-
--role-name AmazonEKS_EBS_CSI_DriverRole
151-
--region $AWS_REGION
152-
153-
echo "managing EBS CSI Driver as a separate eks addon"
154-
eksctl create addon --name aws-ebs-csi-driver \
155-
--cluster $CLUSTERNAME \
156-
--service-account-role-arn arn:aws:iam::${ACCOUNT_ID}:role/AmazonEKS_EBS_CSI_DriverRole \
157-
--force \
158-
--region $AWS_REGION
172+
echo "Installing CTFd"
159173

160174
export HELM_EXPERIMENTAL_OCI=1
161175
kubectl create namespace ctfd
162-
helm -n ctfd install ctfd oci://ghcr.io/bman46/ctfd/ctfd \
176+
helm upgrade --install ctfd -n ctfd oci://ghcr.io/bman46/ctfd/ctfd \
163177
--set="redis.auth.password=$(openssl rand -base64 24)" \
164178
--set="mariadb.auth.rootPassword=$(openssl rand -base64 24)" \
165179
--set="mariadb.auth.password=$(openssl rand -base64 24)" \

aws/cluster-autoscaler-policy.json

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,36 +1,36 @@
11
{
2-
"Version": "2012-10-17",
3-
"Statement": [
4-
{
5-
"Sid": "VisualEditor0",
6-
"Effect": "Allow",
7-
"Action": [
8-
"autoscaling:SetDesiredCapacity",
9-
"autoscaling:TerminateInstanceInAutoScalingGroup",
10-
"ec2:DescribeImages",
11-
"ec2:GetInstanceTypesFromInstanceRequirements",
12-
"eks:DescribeNodegroup"
13-
],
14-
"Resource": "*",
15-
"Condition": {
16-
"StringEquals": {
17-
"aws:ResourceTag/k8s.io/cluster-autoscaler/wrongsecrets-exercise-cluster": "owned"
18-
}
19-
}
20-
},
21-
{
22-
"Sid": "VisualEditor1",
23-
"Effect": "Allow",
24-
"Action": [
25-
"autoscaling:DescribeAutoScalingGroups",
26-
"autoscaling:DescribeAutoScalingInstances",
27-
"autoscaling:DescribeLaunchConfigurations",
28-
"autoscaling:DescribeScalingActivities",
29-
"autoscaling:DescribeTags",
30-
"ec2:DescribeInstanceTypes",
31-
"ec2:DescribeLaunchTemplateVersions"
32-
],
33-
"Resource": "*"
2+
"Version": "2012-10-17",
3+
"Statement": [
4+
{
5+
"Sid": "VisualEditor0",
6+
"Effect": "Allow",
7+
"Action": [
8+
"autoscaling:SetDesiredCapacity",
9+
"autoscaling:TerminateInstanceInAutoScalingGroup",
10+
"ec2:DescribeImages",
11+
"ec2:GetInstanceTypesFromInstanceRequirements",
12+
"eks:DescribeNodegroup"
13+
],
14+
"Resource": "*",
15+
"Condition": {
16+
"StringEquals": {
17+
"aws:ResourceTag/k8s.io/cluster-autoscaler/wrongsecrets-exercise-cluster": "owned"
3418
}
35-
]
19+
}
20+
},
21+
{
22+
"Sid": "VisualEditor1",
23+
"Effect": "Allow",
24+
"Action": [
25+
"autoscaling:DescribeAutoScalingGroups",
26+
"autoscaling:DescribeAutoScalingInstances",
27+
"autoscaling:DescribeLaunchConfigurations",
28+
"autoscaling:DescribeScalingActivities",
29+
"autoscaling:DescribeTags",
30+
"ec2:DescribeInstanceTypes",
31+
"ec2:DescribeLaunchTemplateVersions"
32+
],
33+
"Resource": "*"
34+
}
35+
]
3636
}

aws/main.tf

Lines changed: 65 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@ terraform {
22
# Set your region and bucket name (output from shared state) in the placeholder below
33
# Then uncomment and apply!
44
backend "s3" {
5-
region = "eu-west-1" # Change if desired
6-
bucket = "terraform-20230102231352749300000001" # Put your bucket name here
7-
key = "wrongsecrets/terraform.tfstate" # Change if desired
5+
region = "eu-west-1" # Change if desired
6+
bucket = "terraform-20230105182940038600000001" # Put your bucket name here
7+
key = "wrongsecrets/terraform.tfstate" # Change if desired
88
}
99
}
1010

@@ -70,6 +70,12 @@ module "eks" {
7070
vpc_id = module.vpc.vpc_id
7171
subnet_ids = module.vpc.private_subnets
7272

73+
cluster_addons = {
74+
aws-ebs-csi-driver = {
75+
most_recent = true
76+
}
77+
}
78+
7379

7480
cluster_endpoint_private_access = true
7581
cluster_endpoint_public_access = true
@@ -91,20 +97,20 @@ module "eks" {
9197
instance_types = ["t3a.medium"]
9298

9399
iam_role_additional_policies = {
94-
AmazonEKSWorkerNodePolicy: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
95-
AmazonEKS_CNI_Policy: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
96-
AmazonEC2ContainerRegistryReadOnly: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
97-
AmazonSSMManagedInstanceCore: "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore",
98-
AmazonEKSVPCResourceController: "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
100+
AmazonEKSWorkerNodePolicy : "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
101+
AmazonEKS_CNI_Policy : "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
102+
AmazonEC2ContainerRegistryReadOnly : "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
103+
AmazonSSMManagedInstanceCore : "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore",
104+
AmazonEKSVPCResourceController : "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
99105
}
100106
}
101107

102108
eks_managed_node_groups = {
103109
bottlerocket_default = {
104110
use_custom_launch_template = false
105-
min_size = 3
106-
max_size = 50
107-
desired_size = 3
111+
min_size = 3
112+
max_size = 50
113+
desired_size = 3
108114

109115
capacity_type = "ON_DEMAND"
110116

@@ -131,3 +137,51 @@ module "eks" {
131137
"k8s.io/cluster-autoscaler/enabled" = true
132138
}
133139
}
140+
141+
# Cluster Autoscaler IRSA
142+
module "cluster_autoscaler_irsa_role" {
143+
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
144+
version = "~> 5.9.0"
145+
146+
147+
role_name = "wrongsecrets-cluster-autoscaler"
148+
attach_cluster_autoscaler_policy = true
149+
cluster_autoscaler_cluster_ids = [module.eks.cluster_name]
150+
151+
oidc_providers = {
152+
cluster = {
153+
provider_arn = module.eks.oidc_provider_arn
154+
namespace_service_accounts = ["kube-system:cluster-autoscaler"]
155+
}
156+
}
157+
}
158+
159+
module "ebs_csi_irsa_role" {
160+
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
161+
version = "~> 5.9.0"
162+
163+
role_name = "wrongsecrets-ebs-csi"
164+
attach_ebs_csi_policy = true
165+
166+
oidc_providers = {
167+
main = {
168+
provider_arn = module.eks.oidc_provider_arn
169+
namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"]
170+
}
171+
}
172+
}
173+
174+
module "load_balancer_controller_irsa_role" {
175+
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
176+
version = "~> 5.9.0"
177+
178+
role_name = "wrongsecrets-load-balancer-controller"
179+
attach_load_balancer_controller_policy = true
180+
181+
oidc_providers = {
182+
main = {
183+
provider_arn = module.eks.oidc_provider_arn
184+
namespace_service_accounts = ["kube-system:aws-load-balancer-controller"]
185+
}
186+
}
187+
}

0 commit comments

Comments
 (0)