diff --git a/demo/setup.sh b/demo/setup.sh index 94ae18d..7c46c06 100755 --- a/demo/setup.sh +++ b/demo/setup.sh @@ -26,6 +26,10 @@ set -eux git_repo_root=$(git rev-parse --show-toplevel) + +# Source the common setup script +source ${git_repo_root}/scripts/common.sh + kube_config_path=${git_repo_root}/k8s/kube-config.yaml demo_yaml_path=${git_repo_root}/demo/yaml @@ -61,63 +65,65 @@ export KUBECONFIG=${kube_config_path} # Begin deployment, one region at a time for region in eu us; do + CONTEXT_NAME=$(get_cluster_context "${region}") + if [ $trunk -eq 1 ] then # Deploy CloudNativePG operator (trunk - main branch) curl -sSfL \ https://raw.githubusercontent.com/cloudnative-pg/artifacts/main/manifests/operator-manifest.yaml | \ - kubectl --context kind-k8s-${region} apply -f - --server-side + kubectl --context ${CONTEXT_NAME} apply -f - --server-side else # Deploy CloudNativePG operator (latest version, through the plugin) kubectl cnpg install generate --control-plane | \ - kubectl --context kind-k8s-${region} apply -f - --server-side + kubectl --context ${CONTEXT_NAME} apply -f - --server-side fi # Wait for CNPG deployment to complete - kubectl --context kind-k8s-${region} rollout status deployment \ + kubectl --context ${CONTEXT_NAME} rollout status deployment \ -n cnpg-system cnpg-controller-manager # Deploy cert-manager - kubectl apply --context kind-k8s-${region} -f \ + kubectl apply --context ${CONTEXT_NAME} -f \ https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml # Wait for cert-manager deployment to complete - kubectl rollout --context kind-k8s-${region} status deployment \ + kubectl rollout --context ${CONTEXT_NAME} status deployment \ -n cert-manager - cmctl check api --wait=2m --context kind-k8s-${region} + cmctl check api --wait=2m --context ${CONTEXT_NAME} if [ $trunk -eq 1 ] then # Deploy Barman Cloud Plugin (trunk) - kubectl apply --context kind-k8s-${region} -f \ + kubectl apply --context ${CONTEXT_NAME} -f \ https://raw.githubusercontent.com/cloudnative-pg/plugin-barman-cloud/refs/heads/main/manifest.yaml else # Deploy Barman Cloud Plugin (latest stable) - kubectl apply --context kind-k8s-${region} -f \ + kubectl apply --context ${CONTEXT_NAME} -f \ https://github.com/cloudnative-pg/plugin-barman-cloud/releases/latest/download/manifest.yaml fi # Wait for Barman Cloud Plugin deployment to complete - kubectl rollout --context kind-k8s-${region} status deployment \ + kubectl rollout --context ${CONTEXT_NAME} status deployment \ -n cnpg-system barman-cloud # Create Barman object stores - kubectl apply --context kind-k8s-${region} -f \ + kubectl apply --context ${CONTEXT_NAME} -f \ ${demo_yaml_path}/object-stores # Create the Postgres cluster - kubectl apply --context kind-k8s-${region} -f \ + kubectl apply --context ${CONTEXT_NAME} -f \ ${demo_yaml_path}/${region}/pg-${region}${legacy}.yaml # Create the PodMonitor if Prometheus has been installed if check_crd_existence podmonitors.monitoring.coreos.com then - kubectl apply --context kind-k8s-${region} -f \ + kubectl apply --context ${CONTEXT_NAME} -f \ ${demo_yaml_path}/${region}/pg-${region}-podmonitor.yaml fi # Wait for the cluster to be ready - kubectl wait --context kind-k8s-${region} \ + kubectl wait --context ${CONTEXT_NAME} \ --timeout 30m \ --for=condition=Ready cluster/pg-${region} diff --git a/demo/teardown.sh b/demo/teardown.sh index 97d7f3b..948b21b 100755 --- a/demo/teardown.sh +++ b/demo/teardown.sh @@ -24,6 +24,10 @@ set -ux git_repo_root=$(git rev-parse --show-toplevel) + +# Source the common setup script +source ${git_repo_root}/scripts/common.sh + kube_config_path=${git_repo_root}/k8s/kube-config.yaml demo_yaml_path=${git_repo_root}/demo/yaml @@ -34,25 +38,27 @@ export KUBECONFIG=${kube_config_path} # Delete deployment, one region at a time for region in eu us; do + CONTEXT_NAME=$(get_cluster_context "${region}") + # Delete the Postgres cluster - kubectl delete --context kind-k8s-${region} -f \ + kubectl delete --context ${CONTEXT_NAME} -f \ ${demo_yaml_path}/${region} # Delete Barman object stores - kubectl delete --context kind-k8s-${region} -f \ + kubectl delete --context ${CONTEXT_NAME} -f \ ${demo_yaml_path}/object-stores # Delete Barman Cloud Plugin - kubectl delete --context kind-k8s-${region} -f \ + kubectl delete --context ${CONTEXT_NAME} -f \ https://github.com/cloudnative-pg/plugin-barman-cloud/releases/latest/download/manifest.yaml # Delete cert-manager - kubectl delete --context kind-k8s-${region} -f \ + kubectl delete --context ${CONTEXT_NAME} -f \ https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml # Delete CNPG operator kubectl cnpg install generate --control-plane | \ - kubectl --context kind-k8s-${region} delete -f - + kubectl --context ${CONTEXT_NAME} delete -f - # Remove backup data docker exec minio-${region} rm -rf /data/backups/pg-${region} diff --git a/monitoring/setup.sh b/monitoring/setup.sh index ae4f334..feb1d1c 100755 --- a/monitoring/setup.sh +++ b/monitoring/setup.sh @@ -38,17 +38,18 @@ for region in "${REGIONS[@]}"; do echo " đŸ”Ĩ Provisioning Prometheus resources for region: ${region}" echo "-------------------------------------------------------------" - K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}" + K8S_CLUSTER_NAME=$(get_cluster_name "${region}") + CONTEXT_NAME=$(get_cluster_context "${region}") # Deploy the Prometheus operator in the playground Kubernetes clusters - kubectl --context kind-${K8S_CLUSTER_NAME} create ns prometheus-operator || true + kubectl --context ${CONTEXT_NAME} create ns prometheus-operator || true kubectl kustomize ${GIT_REPO_ROOT}/monitoring/prometheus-operator | \ - kubectl --context kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side -f - + kubectl --context ${CONTEXT_NAME} apply --force-conflicts --server-side -f - # We make sure that monitoring workloads are deployed in the infrastructure node. kubectl kustomize ${GIT_REPO_ROOT}/monitoring/prometheus-instance | \ - kubectl --context=kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side -f - - kubectl --context=kind-${K8S_CLUSTER_NAME} -n prometheus-operator \ + kubectl --context=${CONTEXT_NAME} apply --force-conflicts --server-side -f - + kubectl --context=${CONTEXT_NAME} -n prometheus-operator \ patch deployment prometheus-operator \ --type='merge' \ --patch='{"spec":{"template":{"spec":{"tolerations":[{"key":"node-role.kubernetes.io/infra","operator":"Exists","effect":"NoSchedule"}],"nodeSelector":{"node-role.kubernetes.io/infra":""}}}}}' @@ -58,16 +59,16 @@ for region in "${REGIONS[@]}"; do echo "-------------------------------------------------------------" # Deploying Grafana operator - kubectl --context kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side \ + kubectl --context ${CONTEXT_NAME} apply --force-conflicts --server-side \ -f https://github.com/grafana/grafana-operator/releases/latest/download/kustomize-cluster_scoped.yaml - kubectl --context kind-${K8S_CLUSTER_NAME} -n grafana \ + kubectl --context ${CONTEXT_NAME} -n grafana \ patch deployment grafana-operator-controller-manager \ --type='merge' \ --patch='{"spec":{"template":{"spec":{"tolerations":[{"key":"node-role.kubernetes.io/infra","operator":"Exists","effect":"NoSchedule"}],"nodeSelector":{"node-role.kubernetes.io/infra":""}}}}}' # Creating Grafana instance and dashboards kubectl kustomize ${GIT_REPO_ROOT}/monitoring/grafana/ | \ - kubectl --context kind-${K8S_CLUSTER_NAME} apply -f - + kubectl --context ${CONTEXT_NAME} apply -f - # Restart the operator if kubectl get ns cnpg-system &> /dev/null @@ -80,7 +81,7 @@ fi echo " ⏊ To forward the Grafana service for region: ${region} to your localhost" echo " Wait for the Grafana service to be created and then forward the service" echo "" - echo " kubectl port-forward service/grafana-service ${port}:3000 -n grafana --context kind-k8s-${region}" + echo " kubectl port-forward service/grafana-service ${port}:3000 -n grafana --context ${CONTEXT_NAME}" echo "" echo " You can then connect to the Grafana GUI using" echo " http://localhost:${port}" diff --git a/scripts/common.sh b/scripts/common.sh index b0ce94f..e9a94a9 100644 --- a/scripts/common.sh +++ b/scripts/common.sh @@ -23,7 +23,8 @@ set -euo pipefail # --- Common Configuration --- # Kind base name for clusters -K8S_BASE_NAME=${K8S_NAME:-k8s} +K8S_CONTEXT_PREFIX=${K8S_CONTEXT_PREFIX-kind-} +K8S_BASE_NAME=${K8S_NAME-k8s-} # MinIO Configuration MINIO_IMAGE="${MINIO_IMAGE:-quay.io/minio/minio:RELEASE.2025-09-07T16-13-09Z}" diff --git a/scripts/funcs_regions.sh b/scripts/funcs_regions.sh index a4c05d6..b1081db 100755 --- a/scripts/funcs_regions.sh +++ b/scripts/funcs_regions.sh @@ -45,7 +45,7 @@ detect_running_regions() { else echo "🔎 Auto-detecting all active playground regions..." # The '|| true' prevents the script from exiting if grep finds no matches. - REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}-" | sed "s/^${K8S_BASE_NAME}-//" || true)) + REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}" | sed "s/^${K8S_BASE_NAME}//" || true)) if [ ${#REGIONS[@]} -gt 0 ]; then echo "✅ Found regions: ${REGIONS[*]}" else @@ -53,3 +53,15 @@ detect_running_regions() { fi fi } + +# Helper function that builds the name of the cluster in a standard way given the region +get_cluster_name() { + local region="$1" + echo "${K8S_BASE_NAME}${region}" +} + +# Helper function that builds the name of the context in a standard way given the region +get_cluster_context() { + local region="$1" + echo "${K8S_CONTEXT_PREFIX}${K8S_BASE_NAME}${region}" +} diff --git a/scripts/info.sh b/scripts/info.sh index 9145683..c90dbdb 100755 --- a/scripts/info.sh +++ b/scripts/info.sh @@ -43,11 +43,12 @@ echo "export KUBECONFIG=${KUBE_CONFIG_PATH}" echo echo "Available cluster contexts:" for region in "${REGIONS[@]}"; do - echo " â€ĸ kind-${K8S_BASE_NAME}-${region}" + CONTEXT_NAME=$(get_cluster_context "${region}") + echo " â€ĸ ${CONTEXT_NAME}" done echo echo "To switch to a specific cluster (e.g., the '${REGIONS[0]}' region), use:" -echo "kubectl config use-context kind-${K8S_BASE_NAME}-${REGIONS[0]}" +echo "kubectl config use-context $(get_cluster_context ${REGIONS[0]})" echo # --- Main Info Loop --- @@ -55,16 +56,17 @@ echo "--------------------------------------------------" echo "â„šī¸ Cluster Information" echo "--------------------------------------------------" for region in "${REGIONS[@]}"; do - CONTEXT="kind-${K8S_BASE_NAME}-${region}" + CLUSTER_NAME=$(get_cluster_name "${region}") + CONTEXT_NAME=$(get_cluster_context "${region}") echo - echo "🔷 Cluster: ${CONTEXT}" + echo "🔷 Cluster: ${CLUSTER_NAME}" echo "===================================" echo "🔹 Version:" - kubectl --context "${CONTEXT}" version + kubectl --context "${CONTEXT_NAME}" version echo echo "🔹 Nodes:" - kubectl --context "${CONTEXT}" get nodes -o wide + kubectl --context "${CONTEXT_NAME}" get nodes -o wide echo echo "🔹 Secrets:" - kubectl --context "${CONTEXT}" get secrets + kubectl --context "${CONTEXT_NAME}" get secrets done diff --git a/scripts/setup.sh b/scripts/setup.sh index 8b84876..94a3064 100755 --- a/scripts/setup.sh +++ b/scripts/setup.sh @@ -37,14 +37,14 @@ echo "✅ Prerequisites met. Using '$CONTAINER_PROVIDER' as the container provid # --- Pre-flight Check --- echo "🔎 Verifying that no existing playground clusters are running..." # The '|| true' prevents the script from exiting if grep finds no matches. -existing_count=$(kind get clusters | grep -c "^${K8S_BASE_NAME}-" || true) +existing_count=$(kind get clusters | grep -c "^${K8S_BASE_NAME}" || true) if [ "${existing_count}" -gt 0 ]; then echo "❌ Error: Found ${existing_count} existing playground cluster(s)." echo "Please run './scripts/teardown.sh' to remove the existing environment before running setup." echo echo "Found clusters:" - kind get clusters | grep "^${K8S_BASE_NAME}-" + kind get clusters | grep "^${K8S_BASE_NAME}" exit 1 fi @@ -70,7 +70,7 @@ for region in "${REGIONS[@]}"; do echo "🚀 Provisioning resources for region: ${region}" echo "--------------------------------------------------" - K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}" + K8S_CLUSTER_NAME=$(get_cluster_name "${region}") MINIO_CONTAINER_NAME="${MINIO_BASE_NAME}-${region}" echo "đŸ“Ļ Creating MinIO container '${MINIO_CONTAINER_NAME}' on host port ${current_minio_port}..." @@ -107,7 +107,7 @@ echo "--------------------------------------------------" echo "🔑 Distributing MinIO secrets to all clusters" echo "--------------------------------------------------" for target_region in "${REGIONS[@]}"; do - target_cluster_context="kind-${K8S_BASE_NAME}-${target_region}" + target_cluster_context=$(get_cluster_context "${target_region}") echo " -> Configuring secrets in cluster: ${target_cluster_context}" for source_minio_name in "${all_minio_names[@]}"; do diff --git a/scripts/teardown.sh b/scripts/teardown.sh index 59b653f..ac4fcde 100755 --- a/scripts/teardown.sh +++ b/scripts/teardown.sh @@ -25,10 +25,17 @@ source "$(dirname "$0")/common.sh" # Determine regions from arguments, or auto-detect if none are provided detect_running_regions "$@" +if [ ${#REGIONS[@]} -eq 0 ]; then + echo "🤷 No regions found to tear down. Exiting." + exit 0 +fi + +echo "đŸ”Ĩ Tearing down regions: ${REGIONS[*]}" + for region in "${REGIONS[@]}"; do - K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}" + K8S_CLUSTER_NAME=$(get_cluster_name "${region}") + CONTEXT_NAME=$(get_cluster_context "${region}") MINIO_CONTAINER_NAME="${MINIO_BASE_NAME}-${region}" - CONTEXT_NAME="kind-${K8S_CLUSTER_NAME}" echo "--------------------------------------------------" echo "đŸ”Ĩ Tearing down region: ${region}"