Skip to content

Commit 0ad0655

Browse files
committed
refactor: centralise k8s context and cluster naming logic
Introduces the `get_cluster_context` and `get_cluster_name` helper functions to standardise how names are derived from environment variables. - Uses K8S_CONTEXT_PREFIX (default: kind-) and K8S_BASE_NAME (default: k8s-). - Ensures consistent string concatenation across the scripts. Signed-off-by: Gabriele Bartolini <gabriele.bartolini@enterprisedb.com>
1 parent f365164 commit 0ad0655

File tree

7 files changed

+73
-45
lines changed

7 files changed

+73
-45
lines changed

demo/setup.sh

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@
2626
set -eux
2727

2828
git_repo_root=$(git rev-parse --show-toplevel)
29+
30+
# Source the common setup script
31+
source ${git_repo_root}/scripts/common.sh
32+
2933
kube_config_path=${git_repo_root}/k8s/kube-config.yaml
3034
demo_yaml_path=${git_repo_root}/demo/yaml
3135

@@ -61,63 +65,65 @@ export KUBECONFIG=${kube_config_path}
6165
# Begin deployment, one region at a time
6266
for region in eu us; do
6367

68+
CONTEXT_NAME=$(get_cluster_context "${region}")
69+
6470
if [ $trunk -eq 1 ]
6571
then
6672
# Deploy CloudNativePG operator (trunk - main branch)
6773
curl -sSfL \
6874
https://raw.githubusercontent.com/cloudnative-pg/artifacts/main/manifests/operator-manifest.yaml | \
69-
kubectl --context kind-k8s-${region} apply -f - --server-side
75+
kubectl --context ${CONTEXT_NAME} apply -f - --server-side
7076
else
7177
# Deploy CloudNativePG operator (latest version, through the plugin)
7278
kubectl cnpg install generate --control-plane | \
73-
kubectl --context kind-k8s-${region} apply -f - --server-side
79+
kubectl --context ${CONTEXT_NAME} apply -f - --server-side
7480
fi
7581

7682
# Wait for CNPG deployment to complete
77-
kubectl --context kind-k8s-${region} rollout status deployment \
83+
kubectl --context ${CONTEXT_NAME} rollout status deployment \
7884
-n cnpg-system cnpg-controller-manager
7985

8086
# Deploy cert-manager
81-
kubectl apply --context kind-k8s-${region} -f \
87+
kubectl apply --context ${CONTEXT_NAME} -f \
8288
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
8389

8490
# Wait for cert-manager deployment to complete
85-
kubectl rollout --context kind-k8s-${region} status deployment \
91+
kubectl rollout --context ${CONTEXT_NAME} status deployment \
8692
-n cert-manager
87-
cmctl check api --wait=2m --context kind-k8s-${region}
93+
cmctl check api --wait=2m --context ${CONTEXT_NAME}
8894

8995
if [ $trunk -eq 1 ]
9096
then
9197
# Deploy Barman Cloud Plugin (trunk)
92-
kubectl apply --context kind-k8s-${region} -f \
98+
kubectl apply --context ${CONTEXT_NAME} -f \
9399
https://raw.githubusercontent.com/cloudnative-pg/plugin-barman-cloud/refs/heads/main/manifest.yaml
94100
else
95101
# Deploy Barman Cloud Plugin (latest stable)
96-
kubectl apply --context kind-k8s-${region} -f \
102+
kubectl apply --context ${CONTEXT_NAME} -f \
97103
https://github.com/cloudnative-pg/plugin-barman-cloud/releases/latest/download/manifest.yaml
98104
fi
99105

100106
# Wait for Barman Cloud Plugin deployment to complete
101-
kubectl rollout --context kind-k8s-${region} status deployment \
107+
kubectl rollout --context ${CONTEXT_NAME} status deployment \
102108
-n cnpg-system barman-cloud
103109

104110
# Create Barman object stores
105-
kubectl apply --context kind-k8s-${region} -f \
111+
kubectl apply --context ${CONTEXT_NAME} -f \
106112
${demo_yaml_path}/object-stores
107113

108114
# Create the Postgres cluster
109-
kubectl apply --context kind-k8s-${region} -f \
115+
kubectl apply --context ${CONTEXT_NAME} -f \
110116
${demo_yaml_path}/${region}/pg-${region}${legacy}.yaml
111117

112118
# Create the PodMonitor if Prometheus has been installed
113119
if check_crd_existence podmonitors.monitoring.coreos.com
114120
then
115-
kubectl apply --context kind-k8s-${region} -f \
121+
kubectl apply --context ${CONTEXT_NAME} -f \
116122
${demo_yaml_path}/${region}/pg-${region}-podmonitor.yaml
117123
fi
118124

119125
# Wait for the cluster to be ready
120-
kubectl wait --context kind-k8s-${region} \
126+
kubectl wait --context ${CONTEXT_NAME} \
121127
--timeout 30m \
122128
--for=condition=Ready cluster/pg-${region}
123129

demo/teardown.sh

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@
2424
set -ux
2525

2626
git_repo_root=$(git rev-parse --show-toplevel)
27+
28+
# Source the common setup script
29+
source ${git_repo_root}/scripts/common.sh
30+
2731
kube_config_path=${git_repo_root}/k8s/kube-config.yaml
2832
demo_yaml_path=${git_repo_root}/demo/yaml
2933

@@ -34,25 +38,27 @@ export KUBECONFIG=${kube_config_path}
3438
# Delete deployment, one region at a time
3539
for region in eu us; do
3640

41+
CONTEXT_NAME=$(get_cluster_context "${region}")
42+
3743
# Delete the Postgres cluster
38-
kubectl delete --context kind-k8s-${region} -f \
44+
kubectl delete --context ${CONTEXT_NAME} -f \
3945
${demo_yaml_path}/${region}
4046

4147
# Delete Barman object stores
42-
kubectl delete --context kind-k8s-${region} -f \
48+
kubectl delete --context ${CONTEXT_NAME} -f \
4349
${demo_yaml_path}/object-stores
4450

4551
# Delete Barman Cloud Plugin
46-
kubectl delete --context kind-k8s-${region} -f \
52+
kubectl delete --context ${CONTEXT_NAME} -f \
4753
https://github.com/cloudnative-pg/plugin-barman-cloud/releases/latest/download/manifest.yaml
4854

4955
# Delete cert-manager
50-
kubectl delete --context kind-k8s-${region} -f \
56+
kubectl delete --context ${CONTEXT_NAME} -f \
5157
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
5258

5359
# Delete CNPG operator
5460
kubectl cnpg install generate --control-plane | \
55-
kubectl --context kind-k8s-${region} delete -f -
61+
kubectl --context ${CONTEXT_NAME} delete -f -
5662

5763
# Remove backup data
5864
docker exec minio-${region} rm -rf /data/backups/pg-${region}

monitoring/setup.sh

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ if [ $# -gt 0 ]; then
3535
else
3636
echo "🔎 Auto-detecting all active playground regions for monitoring setup..."
3737
# The '|| true' prevents the script from exiting if grep finds no matches.
38-
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}-" | sed "s/^${K8S_BASE_NAME}-//" || true))
38+
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}" | sed "s/^${K8S_BASE_NAME}//" || true))
3939
fi
4040

4141
# Add a target port for the port-forward, the port will be incremeted by 1 for each region
@@ -46,17 +46,18 @@ for region in "${REGIONS[@]}"; do
4646
echo " 🔥 Provisioning Prometheus resources for region: ${region}"
4747
echo "-------------------------------------------------------------"
4848

49-
K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
49+
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
50+
CONTEXT_NAME=$(get_cluster_context "${region}")
5051

5152
# Deploy the Prometheus operator in the playground Kubernetes clusters
52-
kubectl --context kind-${K8S_CLUSTER_NAME} create ns prometheus-operator || true
53+
kubectl --context ${CONTEXT_NAME} create ns prometheus-operator || true
5354
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/prometheus-operator | \
54-
kubectl --context kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side -f -
55+
kubectl --context ${CONTEXT_NAME} apply --force-conflicts --server-side -f -
5556

5657
# We make sure that monitoring workloads are deployed in the infrastructure node.
5758
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/prometheus-instance | \
58-
kubectl --context=kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side -f -
59-
kubectl --context=kind-${K8S_CLUSTER_NAME} -n prometheus-operator \
59+
kubectl --context=${CONTEXT_NAME} apply --force-conflicts --server-side -f -
60+
kubectl --context=${CONTEXT_NAME} -n prometheus-operator \
6061
patch deployment prometheus-operator \
6162
--type='merge' \
6263
--patch='{"spec":{"template":{"spec":{"tolerations":[{"key":"node-role.kubernetes.io/infra","operator":"Exists","effect":"NoSchedule"}],"nodeSelector":{"node-role.kubernetes.io/infra":""}}}}}'
@@ -66,16 +67,16 @@ for region in "${REGIONS[@]}"; do
6667
echo "-------------------------------------------------------------"
6768

6869
# Deploying Grafana operator
69-
kubectl --context kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side \
70+
kubectl --context ${CONTEXT_NAME} apply --force-conflicts --server-side \
7071
-f https://github.com/grafana/grafana-operator/releases/latest/download/kustomize-cluster_scoped.yaml
71-
kubectl --context kind-${K8S_CLUSTER_NAME} -n grafana \
72+
kubectl --context ${CONTEXT_NAME} -n grafana \
7273
patch deployment grafana-operator-controller-manager \
7374
--type='merge' \
7475
--patch='{"spec":{"template":{"spec":{"tolerations":[{"key":"node-role.kubernetes.io/infra","operator":"Exists","effect":"NoSchedule"}],"nodeSelector":{"node-role.kubernetes.io/infra":""}}}}}'
7576

7677
# Creating Grafana instance and dashboards
7778
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/grafana/ | \
78-
kubectl --context kind-${K8S_CLUSTER_NAME} apply -f -
79+
kubectl --context ${CONTEXT_NAME} apply -f -
7980

8081
# Restart the operator
8182
if kubectl get ns cnpg-system &> /dev/null
@@ -88,7 +89,7 @@ fi
8889
echo " ⏩ To forward the Grafana service for region: ${region} to your localhost"
8990
echo " Wait for the Grafana service to be created and then forward the service"
9091
echo ""
91-
echo " kubectl port-forward service/grafana-service ${port}:3000 -n grafana --context kind-k8s-${region}"
92+
echo " kubectl port-forward service/grafana-service ${port}:3000 -n grafana --context ${CONTEXT_NAME}"
9293
echo ""
9394
echo " You can then connect to the Grafana GUI using"
9495
echo " http://localhost:${port}"

scripts/common.sh

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@ set -euo pipefail
2323

2424
# --- Common Configuration ---
2525
# Kind base name for clusters
26-
K8S_BASE_NAME=${K8S_NAME:-k8s}
26+
K8S_CONTEXT_PREFIX=${K8S_CONTEXT_PREFIX-kind-}
27+
K8S_BASE_NAME=${K8S_NAME-k8s-}
2728

2829
# MinIO Configuration
2930
MINIO_IMAGE="${MINIO_IMAGE:-quay.io/minio/minio:RELEASE.2025-09-07T16-13-09Z}"
@@ -59,3 +60,15 @@ fi
5960
# Determine project root and kubeconfig path
6061
GIT_REPO_ROOT=$(git rev-parse --show-toplevel)
6162
KUBE_CONFIG_PATH="${GIT_REPO_ROOT}/k8s/kube-config.yaml"
63+
64+
# Helper function that builds the name of the cluster in a standard way given the region
65+
get_cluster_name() {
66+
local region="$1"
67+
echo "${K8S_BASE_NAME}${region}"
68+
}
69+
70+
# Helper function that builds the name of the context in a standard way given the region
71+
get_cluster_context() {
72+
local region="$1"
73+
echo "${K8S_CONTEXT_PREFIX}${K8S_BASE_NAME}${region}"
74+
}

scripts/info.sh

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@ export KUBECONFIG="${KUBE_CONFIG_PATH}"
3131

3232
# --- Auto-detect Regions ---
3333
echo "🔎 Detecting active playground clusters..."
34-
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}-" | sed "s/^${K8S_BASE_NAME}-//" || true))
34+
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}" | sed "s/^${K8S_BASE_NAME}//" || true))
3535

3636
if [ ${#REGIONS[@]} -eq 0 ]; then
37-
echo "🤷 No active playground clusters found with the prefix '${K8S_BASE_NAME}-'."
37+
echo "🤷 No active playground clusters found with the prefix '${K8S_BASE_NAME}'."
3838
exit 0
3939
fi
4040
echo "✅ Found regions: ${REGIONS[*]}"
@@ -50,28 +50,30 @@ echo "export KUBECONFIG=${KUBE_CONFIG_PATH}"
5050
echo
5151
echo "Available cluster contexts:"
5252
for region in "${REGIONS[@]}"; do
53-
echo " • kind-${K8S_BASE_NAME}-${region}"
53+
CONTEXT_NAME=$(get_cluster_context "${region}")
54+
echo "${CONTEXT_NAME}"
5455
done
5556
echo
5657
echo "To switch to a specific cluster (e.g., the '${REGIONS[0]}' region), use:"
57-
echo "kubectl config use-context kind-${K8S_BASE_NAME}-${REGIONS[0]}"
58+
echo "kubectl config use-context $(get_cluster_context ${REGIONS[0]})"
5859
echo
5960

6061
# --- Main Info Loop ---
6162
echo "--------------------------------------------------"
6263
echo "ℹ️ Cluster Information"
6364
echo "--------------------------------------------------"
6465
for region in "${REGIONS[@]}"; do
65-
CONTEXT="kind-${K8S_BASE_NAME}-${region}"
66+
CLUSTER_NAME=$(get_cluster_name "${region}")
67+
CONTEXT_NAME=$(get_cluster_context "${region}")
6668
echo
67-
echo "🔷 Cluster: ${CONTEXT}"
69+
echo "🔷 Cluster: ${CLUSTER_NAME}"
6870
echo "==================================="
6971
echo "🔹 Version:"
70-
kubectl --context "${CONTEXT}" version
72+
kubectl --context "${CONTEXT_NAME}" version
7173
echo
7274
echo "🔹 Nodes:"
73-
kubectl --context "${CONTEXT}" get nodes -o wide
75+
kubectl --context "${CONTEXT_NAME}" get nodes -o wide
7476
echo
7577
echo "🔹 Secrets:"
76-
kubectl --context "${CONTEXT}" get secrets
78+
kubectl --context "${CONTEXT_NAME}" get secrets
7779
done

scripts/setup.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,14 @@ echo "✅ Prerequisites met. Using '$CONTAINER_PROVIDER' as the container provid
3737
# --- Pre-flight Check ---
3838
echo "🔎 Verifying that no existing playground clusters are running..."
3939
# The '|| true' prevents the script from exiting if grep finds no matches.
40-
existing_count=$(kind get clusters | grep -c "^${K8S_BASE_NAME}-" || true)
40+
existing_count=$(kind get clusters | grep -c "^${K8S_BASE_NAME}" || true)
4141

4242
if [ "${existing_count}" -gt 0 ]; then
4343
echo "❌ Error: Found ${existing_count} existing playground cluster(s)."
4444
echo "Please run './scripts/teardown.sh' to remove the existing environment before running setup."
4545
echo
4646
echo "Found clusters:"
47-
kind get clusters | grep "^${K8S_BASE_NAME}-"
47+
kind get clusters | grep "^${K8S_BASE_NAME}"
4848
exit 1
4949
fi
5050

@@ -73,7 +73,7 @@ for region in "${REGIONS[@]}"; do
7373
echo "🚀 Provisioning resources for region: ${region}"
7474
echo "--------------------------------------------------"
7575

76-
K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
76+
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
7777
MINIO_CONTAINER_NAME="${MINIO_BASE_NAME}-${region}"
7878

7979
echo "📦 Creating MinIO container '${MINIO_CONTAINER_NAME}' on host port ${current_minio_port}..."
@@ -110,7 +110,7 @@ echo "--------------------------------------------------"
110110
echo "🔑 Distributing MinIO secrets to all clusters"
111111
echo "--------------------------------------------------"
112112
for target_region in "${REGIONS[@]}"; do
113-
target_cluster_context="kind-${K8S_BASE_NAME}-${target_region}"
113+
target_cluster_context=$(get_cluster_context "${target_region}")
114114
echo " -> Configuring secrets in cluster: ${target_cluster_context}"
115115

116116
for source_minio_name in "${all_minio_names[@]}"; do

scripts/teardown.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ if [ $# -gt 0 ]; then
3030
else
3131
echo "🔎 Auto-detecting all active playground regions for teardown..."
3232
# The '|| true' prevents the script from exiting if grep finds no matches.
33-
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}-" | sed "s/^${K8S_BASE_NAME}-//" || true))
33+
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}" | sed "s/^${K8S_BASE_NAME}//" || true))
3434
fi
3535

3636
if [ ${#REGIONS[@]} -eq 0 ]; then
@@ -41,9 +41,9 @@ fi
4141
echo "🔥 Tearing down regions: ${REGIONS[*]}"
4242

4343
for region in "${REGIONS[@]}"; do
44-
K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
44+
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
45+
CONTEXT_NAME=$(get_cluster_context "${region}")
4546
MINIO_CONTAINER_NAME="${MINIO_BASE_NAME}-${region}"
46-
CONTEXT_NAME="kind-${K8S_CLUSTER_NAME}"
4747

4848
echo "--------------------------------------------------"
4949
echo "🔥 Tearing down region: ${region}"

0 commit comments

Comments
 (0)