Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 19 additions & 13 deletions demo/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@
set -eux

git_repo_root=$(git rev-parse --show-toplevel)

# Source the common setup script
source ${git_repo_root}/scripts/common.sh

kube_config_path=${git_repo_root}/k8s/kube-config.yaml
demo_yaml_path=${git_repo_root}/demo/yaml

Expand Down Expand Up @@ -61,63 +65,65 @@ export KUBECONFIG=${kube_config_path}
# Begin deployment, one region at a time
for region in eu us; do

CONTEXT_NAME=$(get_cluster_context "${region}")

if [ $trunk -eq 1 ]
then
# Deploy CloudNativePG operator (trunk - main branch)
curl -sSfL \
https://raw.githubusercontent.com/cloudnative-pg/artifacts/main/manifests/operator-manifest.yaml | \
kubectl --context kind-k8s-${region} apply -f - --server-side
kubectl --context ${CONTEXT_NAME} apply -f - --server-side
else
# Deploy CloudNativePG operator (latest version, through the plugin)
kubectl cnpg install generate --control-plane | \
kubectl --context kind-k8s-${region} apply -f - --server-side
kubectl --context ${CONTEXT_NAME} apply -f - --server-side
fi

# Wait for CNPG deployment to complete
kubectl --context kind-k8s-${region} rollout status deployment \
kubectl --context ${CONTEXT_NAME} rollout status deployment \
-n cnpg-system cnpg-controller-manager

# Deploy cert-manager
kubectl apply --context kind-k8s-${region} -f \
kubectl apply --context ${CONTEXT_NAME} -f \
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml

# Wait for cert-manager deployment to complete
kubectl rollout --context kind-k8s-${region} status deployment \
kubectl rollout --context ${CONTEXT_NAME} status deployment \
-n cert-manager
cmctl check api --wait=2m --context kind-k8s-${region}
cmctl check api --wait=2m --context ${CONTEXT_NAME}

if [ $trunk -eq 1 ]
then
# Deploy Barman Cloud Plugin (trunk)
kubectl apply --context kind-k8s-${region} -f \
kubectl apply --context ${CONTEXT_NAME} -f \
https://raw.githubusercontent.com/cloudnative-pg/plugin-barman-cloud/refs/heads/main/manifest.yaml
else
# Deploy Barman Cloud Plugin (latest stable)
kubectl apply --context kind-k8s-${region} -f \
kubectl apply --context ${CONTEXT_NAME} -f \
https://github.com/cloudnative-pg/plugin-barman-cloud/releases/latest/download/manifest.yaml
fi

# Wait for Barman Cloud Plugin deployment to complete
kubectl rollout --context kind-k8s-${region} status deployment \
kubectl rollout --context ${CONTEXT_NAME} status deployment \
-n cnpg-system barman-cloud

# Create Barman object stores
kubectl apply --context kind-k8s-${region} -f \
kubectl apply --context ${CONTEXT_NAME} -f \
${demo_yaml_path}/object-stores

# Create the Postgres cluster
kubectl apply --context kind-k8s-${region} -f \
kubectl apply --context ${CONTEXT_NAME} -f \
${demo_yaml_path}/${region}/pg-${region}${legacy}.yaml

# Create the PodMonitor if Prometheus has been installed
if check_crd_existence podmonitors.monitoring.coreos.com
then
kubectl apply --context kind-k8s-${region} -f \
kubectl apply --context ${CONTEXT_NAME} -f \
${demo_yaml_path}/${region}/pg-${region}-podmonitor.yaml
fi

# Wait for the cluster to be ready
kubectl wait --context kind-k8s-${region} \
kubectl wait --context ${CONTEXT_NAME} \
--timeout 30m \
--for=condition=Ready cluster/pg-${region}

Expand Down
16 changes: 11 additions & 5 deletions demo/teardown.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@
set -ux

git_repo_root=$(git rev-parse --show-toplevel)

# Source the common setup script
source ${git_repo_root}/scripts/common.sh

kube_config_path=${git_repo_root}/k8s/kube-config.yaml
demo_yaml_path=${git_repo_root}/demo/yaml

Expand All @@ -34,25 +38,27 @@ export KUBECONFIG=${kube_config_path}
# Delete deployment, one region at a time
for region in eu us; do

CONTEXT_NAME=$(get_cluster_context "${region}")

# Delete the Postgres cluster
kubectl delete --context kind-k8s-${region} -f \
kubectl delete --context ${CONTEXT_NAME} -f \
${demo_yaml_path}/${region}

# Delete Barman object stores
kubectl delete --context kind-k8s-${region} -f \
kubectl delete --context ${CONTEXT_NAME} -f \
${demo_yaml_path}/object-stores

# Delete Barman Cloud Plugin
kubectl delete --context kind-k8s-${region} -f \
kubectl delete --context ${CONTEXT_NAME} -f \
https://github.com/cloudnative-pg/plugin-barman-cloud/releases/latest/download/manifest.yaml

# Delete cert-manager
kubectl delete --context kind-k8s-${region} -f \
kubectl delete --context ${CONTEXT_NAME} -f \
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml

# Delete CNPG operator
kubectl cnpg install generate --control-plane | \
kubectl --context kind-k8s-${region} delete -f -
kubectl --context ${CONTEXT_NAME} delete -f -

# Remove backup data
docker exec minio-${region} rm -rf /data/backups/pg-${region}
Expand Down
19 changes: 10 additions & 9 deletions monitoring/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,17 +38,18 @@ for region in "${REGIONS[@]}"; do
echo " 🔥 Provisioning Prometheus resources for region: ${region}"
echo "-------------------------------------------------------------"

K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
CONTEXT_NAME=$(get_cluster_context "${region}")

# Deploy the Prometheus operator in the playground Kubernetes clusters
kubectl --context kind-${K8S_CLUSTER_NAME} create ns prometheus-operator || true
kubectl --context ${CONTEXT_NAME} create ns prometheus-operator || true
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/prometheus-operator | \
kubectl --context kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side -f -
kubectl --context ${CONTEXT_NAME} apply --force-conflicts --server-side -f -

# We make sure that monitoring workloads are deployed in the infrastructure node.
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/prometheus-instance | \
kubectl --context=kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side -f -
kubectl --context=kind-${K8S_CLUSTER_NAME} -n prometheus-operator \
kubectl --context=${CONTEXT_NAME} apply --force-conflicts --server-side -f -
kubectl --context=${CONTEXT_NAME} -n prometheus-operator \
patch deployment prometheus-operator \
--type='merge' \
--patch='{"spec":{"template":{"spec":{"tolerations":[{"key":"node-role.kubernetes.io/infra","operator":"Exists","effect":"NoSchedule"}],"nodeSelector":{"node-role.kubernetes.io/infra":""}}}}}'
Expand All @@ -58,16 +59,16 @@ for region in "${REGIONS[@]}"; do
echo "-------------------------------------------------------------"

# Deploying Grafana operator
kubectl --context kind-${K8S_CLUSTER_NAME} apply --force-conflicts --server-side \
kubectl --context ${CONTEXT_NAME} apply --force-conflicts --server-side \
-f https://github.com/grafana/grafana-operator/releases/latest/download/kustomize-cluster_scoped.yaml
kubectl --context kind-${K8S_CLUSTER_NAME} -n grafana \
kubectl --context ${CONTEXT_NAME} -n grafana \
patch deployment grafana-operator-controller-manager \
--type='merge' \
--patch='{"spec":{"template":{"spec":{"tolerations":[{"key":"node-role.kubernetes.io/infra","operator":"Exists","effect":"NoSchedule"}],"nodeSelector":{"node-role.kubernetes.io/infra":""}}}}}'

# Creating Grafana instance and dashboards
kubectl kustomize ${GIT_REPO_ROOT}/monitoring/grafana/ | \
kubectl --context kind-${K8S_CLUSTER_NAME} apply -f -
kubectl --context ${CONTEXT_NAME} apply -f -

# Restart the operator
if kubectl get ns cnpg-system &> /dev/null
Expand All @@ -80,7 +81,7 @@ fi
echo " ⏩ To forward the Grafana service for region: ${region} to your localhost"
echo " Wait for the Grafana service to be created and then forward the service"
echo ""
echo " kubectl port-forward service/grafana-service ${port}:3000 -n grafana --context kind-k8s-${region}"
echo " kubectl port-forward service/grafana-service ${port}:3000 -n grafana --context ${CONTEXT_NAME}"
echo ""
echo " You can then connect to the Grafana GUI using"
echo " http://localhost:${port}"
Expand Down
3 changes: 2 additions & 1 deletion scripts/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ set -euo pipefail

# --- Common Configuration ---
# Kind base name for clusters
K8S_BASE_NAME=${K8S_NAME:-k8s}
K8S_CONTEXT_PREFIX=${K8S_CONTEXT_PREFIX-kind-}
K8S_BASE_NAME=${K8S_NAME-k8s-}

# MinIO Configuration
MINIO_IMAGE="${MINIO_IMAGE:-quay.io/minio/minio:RELEASE.2025-09-07T16-13-09Z}"
Expand Down
14 changes: 13 additions & 1 deletion scripts/funcs_regions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,23 @@ detect_running_regions() {
else
echo "🔎 Auto-detecting all active playground regions..."
# The '|| true' prevents the script from exiting if grep finds no matches.
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}-" | sed "s/^${K8S_BASE_NAME}-//" || true))
REGIONS=($(kind get clusters | grep "^${K8S_BASE_NAME}" | sed "s/^${K8S_BASE_NAME}//" || true))
if [ ${#REGIONS[@]} -gt 0 ]; then
echo "✅ Found regions: ${REGIONS[*]}"
else
echo "✅ No region detected"
fi
fi
}

# Helper function that builds the name of the cluster in a standard way given the region
get_cluster_name() {
local region="$1"
echo "${K8S_BASE_NAME}${region}"
}

# Helper function that builds the name of the context in a standard way given the region
get_cluster_context() {
local region="$1"
echo "${K8S_CONTEXT_PREFIX}${K8S_BASE_NAME}${region}"
}
16 changes: 9 additions & 7 deletions scripts/info.sh
Original file line number Diff line number Diff line change
Expand Up @@ -43,28 +43,30 @@ echo "export KUBECONFIG=${KUBE_CONFIG_PATH}"
echo
echo "Available cluster contexts:"
for region in "${REGIONS[@]}"; do
echo " • kind-${K8S_BASE_NAME}-${region}"
CONTEXT_NAME=$(get_cluster_context "${region}")
echo " • ${CONTEXT_NAME}"
done
echo
echo "To switch to a specific cluster (e.g., the '${REGIONS[0]}' region), use:"
echo "kubectl config use-context kind-${K8S_BASE_NAME}-${REGIONS[0]}"
echo "kubectl config use-context $(get_cluster_context ${REGIONS[0]})"
echo

# --- Main Info Loop ---
echo "--------------------------------------------------"
echo "ℹ️ Cluster Information"
echo "--------------------------------------------------"
for region in "${REGIONS[@]}"; do
CONTEXT="kind-${K8S_BASE_NAME}-${region}"
CLUSTER_NAME=$(get_cluster_name "${region}")
CONTEXT_NAME=$(get_cluster_context "${region}")
echo
echo "🔷 Cluster: ${CONTEXT}"
echo "🔷 Cluster: ${CLUSTER_NAME}"
echo "==================================="
echo "🔹 Version:"
kubectl --context "${CONTEXT}" version
kubectl --context "${CONTEXT_NAME}" version
echo
echo "🔹 Nodes:"
kubectl --context "${CONTEXT}" get nodes -o wide
kubectl --context "${CONTEXT_NAME}" get nodes -o wide
echo
echo "🔹 Secrets:"
kubectl --context "${CONTEXT}" get secrets
kubectl --context "${CONTEXT_NAME}" get secrets
done
8 changes: 4 additions & 4 deletions scripts/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,14 @@ echo "✅ Prerequisites met. Using '$CONTAINER_PROVIDER' as the container provid
# --- Pre-flight Check ---
echo "🔎 Verifying that no existing playground clusters are running..."
# The '|| true' prevents the script from exiting if grep finds no matches.
existing_count=$(kind get clusters | grep -c "^${K8S_BASE_NAME}-" || true)
existing_count=$(kind get clusters | grep -c "^${K8S_BASE_NAME}" || true)

if [ "${existing_count}" -gt 0 ]; then
echo "❌ Error: Found ${existing_count} existing playground cluster(s)."
echo "Please run './scripts/teardown.sh' to remove the existing environment before running setup."
echo
echo "Found clusters:"
kind get clusters | grep "^${K8S_BASE_NAME}-"
kind get clusters | grep "^${K8S_BASE_NAME}"
exit 1
fi

Expand All @@ -70,7 +70,7 @@ for region in "${REGIONS[@]}"; do
echo "🚀 Provisioning resources for region: ${region}"
echo "--------------------------------------------------"

K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
MINIO_CONTAINER_NAME="${MINIO_BASE_NAME}-${region}"

echo "📦 Creating MinIO container '${MINIO_CONTAINER_NAME}' on host port ${current_minio_port}..."
Expand Down Expand Up @@ -107,7 +107,7 @@ echo "--------------------------------------------------"
echo "🔑 Distributing MinIO secrets to all clusters"
echo "--------------------------------------------------"
for target_region in "${REGIONS[@]}"; do
target_cluster_context="kind-${K8S_BASE_NAME}-${target_region}"
target_cluster_context=$(get_cluster_context "${target_region}")
echo " -> Configuring secrets in cluster: ${target_cluster_context}"

for source_minio_name in "${all_minio_names[@]}"; do
Expand Down
11 changes: 9 additions & 2 deletions scripts/teardown.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,17 @@ source "$(dirname "$0")/common.sh"
# Determine regions from arguments, or auto-detect if none are provided
detect_running_regions "$@"

if [ ${#REGIONS[@]} -eq 0 ]; then
echo "🤷 No regions found to tear down. Exiting."
exit 0
fi

echo "🔥 Tearing down regions: ${REGIONS[*]}"

for region in "${REGIONS[@]}"; do
K8S_CLUSTER_NAME="${K8S_BASE_NAME}-${region}"
K8S_CLUSTER_NAME=$(get_cluster_name "${region}")
CONTEXT_NAME=$(get_cluster_context "${region}")
MINIO_CONTAINER_NAME="${MINIO_BASE_NAME}-${region}"
CONTEXT_NAME="kind-${K8S_CLUSTER_NAME}"

echo "--------------------------------------------------"
echo "🔥 Tearing down region: ${region}"
Expand Down