Skip to content

Commit ede886b

Browse files
committed
refactor: centralise k8s context and cluster naming logic
Introduces the `get_cluster_context` and `get_cluster_name` helper functions to standardise how names are derived from environment variables. - Uses K8S_CONTEXT_PREFIX (default: kind-) and K8S_BASE_NAME (default: k8s-). - Ensures consistent string concatenation across the scripts. Signed-off-by: Gabriele Bartolini <gabriele.bartolini@enterprisedb.com>
1 parent b346fc2 commit ede886b

File tree

8 files changed

+77
-42
lines changed

8 files changed

+77
-42
lines changed

demo/setup.sh

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@
2626
set -eux
2727

2828
git_repo_root=$(git rev-parse --show-toplevel)
29+
30+
# Source the common setup script
31+
source ${git_repo_root}/scripts/common.sh
32+
2933
kube_config_path=${git_repo_root}/k8s/kube-config.yaml
3034
demo_yaml_path=${git_repo_root}/demo/yaml
3135

@@ -61,63 +65,65 @@ export KUBECONFIG=${kube_config_path}
6165
# Begin deployment, one region at a time
6266
for region in eu us; do
6367

68+
CONTEXT_NAME=$(get_cluster_context "${region}")
69+
6470
if [ $trunk -eq 1 ]
6571
then
6672
# Deploy CloudNativePG operator (trunk - main branch)
6773
curl -sSfL \
6874
https://raw.githubusercontent.com/cloudnative-pg/artifacts/main/manifests/operator-manifest.yaml | \
69-
kubectl --context kind-k8s-${region} apply -f - --server-side
75+
kubectl --context ${CONTEXT_NAME} apply -f - --server-side
7076
else
7177
# Deploy CloudNativePG operator (latest version, through the plugin)
7278
kubectl cnpg install generate --control-plane | \
73-
kubectl --context kind-k8s-${region} apply -f - --server-side
79+
kubectl --context ${CONTEXT_NAME} apply -f - --server-side
7480
fi
7581

7682
# Wait for CNPG deployment to complete
77-
kubectl --context kind-k8s-${region} rollout status deployment \
83+
kubectl --context ${CONTEXT_NAME} rollout status deployment \
7884
-n cnpg-system cnpg-controller-manager
7985

8086
# Deploy cert-manager
81-
kubectl apply --context kind-k8s-${region} -f \
87+
kubectl apply --context ${CONTEXT_NAME} -f \
8288
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
8389

8490
# Wait for cert-manager deployment to complete
85-
kubectl rollout --context kind-k8s-${region} status deployment \
91+
kubectl rollout --context ${CONTEXT_NAME} status deployment \
8692
-n cert-manager
87-
cmctl check api --wait=2m --context kind-k8s-${region}
93+
cmctl check api --wait=2m --context ${CONTEXT_NAME}
8894

8995
if [ $trunk -eq 1 ]
9096
then
9197
# Deploy Barman Cloud Plugin (trunk)
92-
kubectl apply --context kind-k8s-${region} -f \
98+
kubectl apply --context ${CONTEXT_NAME} -f \
9399
https://raw.githubusercontent.com/cloudnative-pg/plugin-barman-cloud/refs/heads/main/manifest.yaml
94100
else
95101
# Deploy Barman Cloud Plugin (latest stable)
96-
kubectl apply --context kind-k8s-${region} -f \
102+
kubectl apply --context ${CONTEXT_NAME} -f \
97103
https://github.com/cloudnative-pg/plugin-barman-cloud/releases/latest/download/manifest.yaml
98104
fi
99105

100106
# Wait for Barman Cloud Plugin deployment to complete
101-
kubectl rollout --context kind-k8s-${region} status deployment \
107+
kubectl rollout --context ${CONTEXT_NAME} status deployment \
102108
-n cnpg-system barman-cloud
103109

104110
# Create Barman object stores
105-
kubectl apply --context kind-k8s-${region} -f \
111+
kubectl apply --context ${CONTEXT_NAME} -f \
106112
${demo_yaml_path}/object-stores
107113

108114
# Create the Postgres cluster
109-
kubectl apply --context kind-k8s-${region} -f \
115+
kubectl apply --context ${CONTEXT_NAME} -f \
110116
${demo_yaml_path}/${region}/pg-${region}${legacy}.yaml
111117

112118
# Create the PodMonitor if Prometheus has been installed
113119
if check_crd_existence podmonitors.monitoring.coreos.com
114120
then
115-
kubectl apply --context kind-k8s-${region} -f \
121+
kubectl apply --context ${CONTEXT_NAME} -f \
116122
${demo_yaml_path}/${region}/pg-${region}-podmonitor.yaml
117123
fi
118124

119125
# Wait for the cluster to be ready
120-
kubectl wait --context kind-k8s-${region} \
126+
kubectl wait --context ${CONTEXT_NAME} \
121127
--timeout 30m \
122128
--for=condition=Ready cluster/pg-${region}
123129

demo/teardown.sh

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@
2424
set -ux
2525

2626
git_repo_root=$(git rev-parse --show-toplevel)
27+
28+
# Source the common setup script
29+
source ${git_repo_root}/scripts/common.sh
30+
2731
kube_config_path=${git_repo_root}/k8s/kube-config.yaml
2832
demo_yaml_path=${git_repo_root}/demo/yaml
2933

@@ -34,25 +38,27 @@ export KUBECONFIG=${kube_config_path}
3438
# Delete deployment, one region at a time
3539
for region in eu us; do
3640

41+
CONTEXT_NAME=$(get_cluster_context "${region}")
42+
3743
# Delete the Postgres cluster
38-
kubectl delete --context kind-k8s-${region} -f \
44+
kubectl delete --context ${CONTEXT_NAME} -f \
3945
${demo_yaml_path}/${region}
4046

4147
# Delete Barman object stores
42-
kubectl delete --context kind-k8s-${region} -f \
48+
kubectl delete --context ${CONTEXT_NAME} -f \
4349
${demo_yaml_path}/object-stores
4450

4551
# Delete Barman Cloud Plugin
46-
kubectl delete --context kind-k8s-${region} -f \
52+
kubectl delete --context ${CONTEXT_NAME} -f \
4753
https://github.com/cloudnative-pg/plugin-barman-cloud/releases/latest/download/manifest.yaml
4854

4955
# Delete cert-manager
50-
kubectl delete --context kind-k8s-${region} -f \
56+
kubectl delete --context ${CONTEXT_NAME} -f \
5157
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
5258

5359
# Delete CNPG operator
5460
kubectl cnpg install generate --control-plane | \
55-
kubectl --context kind-k8s-${region} delete -f -
61+
kubectl --context ${CONTEXT_NAME} delete -f -
5662

5763
# Remove backup data
5864
docker exec minio-${region} rm -rf /data/backups/pg-${region}

monitoring/setup.sh

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -38,17 +38,18 @@ for region in "${REGIONS[@]}"; do
3838
echo " 🔥 Provisioning Prometheus resources for region: ${region}"
3939
echo "-------------------------------------------------------------"
4040

41-
K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
41+
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
42+
CONTEXT_NAME=$(get_cluster_context "${region}")
4243

4344
# Deploy the Prometheus operator in the playground Kubernetes clusters
44-
kubectl --context kind-${K8S_CLUSTER_NAME} create ns prometheus-operator || true
45+
kubectl --context ${CONTEXT_NAME} create ns prometheus-operator || true
4546
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/prometheus-operator | \
46-
kubectl --context kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side -f -
47+
kubectl --context ${CONTEXT_NAME} apply --force-conflicts --server-side -f -
4748

4849
# We make sure that monitoring workloads are deployed in the infrastructure node.
4950
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/prometheus-instance | \
50-
kubectl --context=kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side -f -
51-
kubectl --context=kind-${K8S_CLUSTER_NAME} -n prometheus-operator \
51+
kubectl --context=${CONTEXT_NAME} apply --force-conflicts --server-side -f -
52+
kubectl --context=${CONTEXT_NAME} -n prometheus-operator \
5253
patch deployment prometheus-operator \
5354
--type='merge' \
5455
--patch='{"spec":{"template":{"spec":{"tolerations":[{"key":"node-role.kubernetes.io/infra","operator":"Exists","effect":"NoSchedule"}],"nodeSelector":{"node-role.kubernetes.io/infra":""}}}}}'
@@ -58,16 +59,16 @@ for region in "${REGIONS[@]}"; do
5859
echo "-------------------------------------------------------------"
5960

6061
# Deploying Grafana operator
61-
kubectl --context kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side \
62+
kubectl --context ${CONTEXT_NAME} apply --force-conflicts --server-side \
6263
-f https://github.com/grafana/grafana-operator/releases/latest/download/kustomize-cluster_scoped.yaml
63-
kubectl --context kind-${K8S_CLUSTER_NAME} -n grafana \
64+
kubectl --context ${CONTEXT_NAME} -n grafana \
6465
patch deployment grafana-operator-controller-manager \
6566
--type='merge' \
6667
--patch='{"spec":{"template":{"spec":{"tolerations":[{"key":"node-role.kubernetes.io/infra","operator":"Exists","effect":"NoSchedule"}],"nodeSelector":{"node-role.kubernetes.io/infra":""}}}}}'
6768

6869
# Creating Grafana instance and dashboards
6970
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/grafana/ | \
70-
kubectl --context kind-${K8S_CLUSTER_NAME} apply -f -
71+
kubectl --context ${CONTEXT_NAME} apply -f -
7172

7273
# Restart the operator
7374
if kubectl get ns cnpg-system &> /dev/null
@@ -80,7 +81,7 @@ fi
8081
echo " ⏩ To forward the Grafana service for region: ${region} to your localhost"
8182
echo " Wait for the Grafana service to be created and then forward the service"
8283
echo ""
83-
echo " kubectl port-forward service/grafana-service ${port}:3000 -n grafana --context kind-k8s-${region}"
84+
echo " kubectl port-forward service/grafana-service ${port}:3000 -n grafana --context ${CONTEXT_NAME}"
8485
echo ""
8586
echo " You can then connect to the Grafana GUI using"
8687
echo " http://localhost:${port}"

scripts/common.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@ set -euo pipefail
2323

2424
# --- Common Configuration ---
2525
# Kind base name for clusters
26-
K8S_BASE_NAME=${K8S_NAME:-k8s}
26+
K8S_CONTEXT_PREFIX=${K8S_CONTEXT_PREFIX-kind-}
27+
K8S_BASE_NAME=${K8S_NAME-k8s-}
2728

2829
# MinIO Configuration
2930
MINIO_IMAGE="${MINIO_IMAGE:-quay.io/minio/minio:RELEASE.2025-09-07T16-13-09Z}"

scripts/funcs_regions.sh

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,11 +45,23 @@ detect_running_regions() {
4545
else
4646
echo "🔎 Auto-detecting all active playground regions..."
4747
# The '|| true' prevents the script from exiting if grep finds no matches.
48-
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}-" | sed "s/^${K8S_BASE_NAME}-//" || true))
48+
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}" | sed "s/^${K8S_BASE_NAME}//" || true))
4949
if [ ${#REGIONS[@]} -gt 0 ]; then
5050
echo "✅ Found regions: ${REGIONS[*]}"
5151
else
5252
echo "✅ No region detected"
5353
fi
5454
fi
5555
}
56+
57+
# Helper function that builds the name of the cluster in a standard way given the region
58+
get_cluster_name() {
59+
local region="$1"
60+
echo "${K8S_BASE_NAME}${region}"
61+
}
62+
63+
# Helper function that builds the name of the context in a standard way given the region
64+
get_cluster_context() {
65+
local region="$1"
66+
echo "${K8S_CONTEXT_PREFIX}${K8S_BASE_NAME}${region}"
67+
}

scripts/info.sh

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -43,28 +43,30 @@ echo "export KUBECONFIG=${KUBE_CONFIG_PATH}"
4343
echo
4444
echo "Available cluster contexts:"
4545
for region in "${REGIONS[@]}"; do
46-
echo " • kind-${K8S_BASE_NAME}-${region}"
46+
CONTEXT_NAME=$(get_cluster_context "${region}")
47+
echo "${CONTEXT_NAME}"
4748
done
4849
echo
4950
echo "To switch to a specific cluster (e.g., the '${REGIONS[0]}' region), use:"
50-
echo "kubectl config use-context kind-${K8S_BASE_NAME}-${REGIONS[0]}"
51+
echo "kubectl config use-context $(get_cluster_context ${REGIONS[0]})"
5152
echo
5253

5354
# --- Main Info Loop ---
5455
echo "--------------------------------------------------"
5556
echo "ℹ️ Cluster Information"
5657
echo "--------------------------------------------------"
5758
for region in "${REGIONS[@]}"; do
58-
CONTEXT="kind-${K8S_BASE_NAME}-${region}"
59+
CLUSTER_NAME=$(get_cluster_name "${region}")
60+
CONTEXT_NAME=$(get_cluster_context "${region}")
5961
echo
60-
echo "🔷 Cluster: ${CONTEXT}"
62+
echo "🔷 Cluster: ${CLUSTER_NAME}"
6163
echo "==================================="
6264
echo "🔹 Version:"
63-
kubectl --context "${CONTEXT}" version
65+
kubectl --context "${CONTEXT_NAME}" version
6466
echo
6567
echo "🔹 Nodes:"
66-
kubectl --context "${CONTEXT}" get nodes -o wide
68+
kubectl --context "${CONTEXT_NAME}" get nodes -o wide
6769
echo
6870
echo "🔹 Secrets:"
69-
kubectl --context "${CONTEXT}" get secrets
71+
kubectl --context "${CONTEXT_NAME}" get secrets
7072
done

scripts/setup.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,14 @@ echo "✅ Prerequisites met. Using '$CONTAINER_PROVIDER' as the container provid
3737
# --- Pre-flight Check ---
3838
echo "🔎 Verifying that no existing playground clusters are running..."
3939
# The '|| true' prevents the script from exiting if grep finds no matches.
40-
existing_count=$(kind get clusters | grep -c "^${K8S_BASE_NAME}-" || true)
40+
existing_count=$(kind get clusters | grep -c "^${K8S_BASE_NAME}" || true)
4141

4242
if [ "${existing_count}" -gt 0 ]; then
4343
echo "❌ Error: Found ${existing_count} existing playground cluster(s)."
4444
echo "Please run './scripts/teardown.sh' to remove the existing environment before running setup."
4545
echo
4646
echo "Found clusters:"
47-
kind get clusters | grep "^${K8S_BASE_NAME}-"
47+
kind get clusters | grep "^${K8S_BASE_NAME}"
4848
exit 1
4949
fi
5050

@@ -70,7 +70,7 @@ for region in "${REGIONS[@]}"; do
7070
echo "🚀 Provisioning resources for region: ${region}"
7171
echo "--------------------------------------------------"
7272

73-
K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
73+
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
7474
MINIO_CONTAINER_NAME="${MINIO_BASE_NAME}-${region}"
7575

7676
echo "📦 Creating MinIO container '${MINIO_CONTAINER_NAME}' on host port ${current_minio_port}..."
@@ -107,7 +107,7 @@ echo "--------------------------------------------------"
107107
echo "🔑 Distributing MinIO secrets to all clusters"
108108
echo "--------------------------------------------------"
109109
for target_region in "${REGIONS[@]}"; do
110-
target_cluster_context="kind-${K8S_BASE_NAME}-${target_region}"
110+
target_cluster_context=$(get_cluster_context "${target_region}")
111111
echo " -> Configuring secrets in cluster: ${target_cluster_context}"
112112

113113
for source_minio_name in "${all_minio_names[@]}"; do

scripts/teardown.sh

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,17 @@ source "$(dirname "$0")/common.sh"
2525
# Determine regions from arguments, or auto-detect if none are provided
2626
detect_running_regions "$@"
2727

28+
if [ ${#REGIONS[@]} -eq 0 ]; then
29+
echo "🤷 No regions found to tear down. Exiting."
30+
exit 0
31+
fi
32+
33+
echo "🔥 Tearing down regions: ${REGIONS[*]}"
34+
2835
for region in "${REGIONS[@]}"; do
29-
K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
36+
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
37+
CONTEXT_NAME=$(get_cluster_context "${region}")
3038
MINIO_CONTAINER_NAME="${MINIO_BASE_NAME}-${region}"
31-
CONTEXT_NAME="kind-${K8S_CLUSTER_NAME}"
3239

3340
echo "--------------------------------------------------"
3441
echo "🔥 Tearing down region: ${region}"

0 commit comments

Comments
 (0)