Skip to content

Commit 0361166

Browse files
committed
2 parents 6a08e0f + b37c2f6 commit 0361166

File tree

3 files changed

+84
-89
lines changed

3 files changed

+84
-89
lines changed

.github/workflows/destroy.yml

Lines changed: 45 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -424,117 +424,85 @@ jobs:
424424
run: |
425425
#!/bin/bash
426426
427-
echo "🗑️ Starting enhanced namespace deletion process..."
427+
echo "Starting namespace deletion process..."
428428
429-
# Function to completely force delete a namespace
430-
force_delete_namespace() {
429+
# Function to delete a namespace with proper error handling
430+
delete_namespace() {
431431
local ns=$1
432-
echo "========== Processing namespace: $ns =========="
432+
echo "Processing namespace: $ns"
433433
434+
# Check if namespace exists
434435
if ! kubectl get namespace "$ns" &>/dev/null; then
435-
echo "Namespace $ns does not exist, skipping..."
436+
echo "Namespace $ns does not exist, skipping..."
436437
return 0
437438
fi
438439
439-
echo "📋 Current namespace status:"
440-
kubectl get namespace $ns -o wide || true
440+
echo "Namespace $ns exists, proceeding with deletion..."
441441
442-
# Step 1: Final resource cleanup in the namespace
443-
echo "🧹 Final cleanup of all resources in namespace $ns..."
442+
# Step 1: Remove finalizers from all resources
443+
echo "Removing finalizers from resources in $ns..."
444+
kubectl api-resources --verbs=list --namespaced -o name 2>/dev/null | \
445+
grep -v events | \
446+
xargs -I {} bash -c "kubectl get {} -n $ns -o name 2>/dev/null | xargs -I {} kubectl patch {} -n $ns -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge 2>/dev/null || true"
444447
445-
# Remove finalizers from all resources in the namespace
446-
for resource_type in $(kubectl api-resources --verbs=list --namespaced -o name 2>/dev/null | grep -v events); do
447-
kubectl get $resource_type -n $ns -o json 2>/dev/null | \
448-
jq -r '.items[]? | select(.metadata.finalizers) | .metadata.name' 2>/dev/null | \
449-
while read resource_name; do
450-
if [[ -n "$resource_name" ]]; then
451-
echo " Removing finalizers from $resource_type/$resource_name"
452-
kubectl patch $resource_type $resource_name -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge 2>/dev/null || true
453-
fi
454-
done
455-
done
448+
# Step 2: Delete the namespace with extended timeout
449+
echo "Deleting namespace $ns..."
450+
if kubectl delete namespace "$ns" --timeout=180s --ignore-not-found; then
451+
echo "Successfully deleted namespace $ns"
452+
return 0
453+
fi
456454
457-
# Step 2: Try graceful deletion first
458-
echo "🔄 Attempting graceful namespace deletion..."
459-
kubectl delete namespace $ns --timeout=60s --ignore-not-found &
460-
DELETE_PID=$!
455+
# Step 3: Force deletion if graceful deletion failed
456+
echo "Graceful deletion failed, attempting force deletion..."
461457
462-
# Wait for graceful deletion
463-
sleep 30
458+
# Remove namespace finalizers
459+
kubectl patch namespace "$ns" -p '{"metadata":{"finalizers":[]}}' --type=merge 2>/dev/null || true
464460
465-
# Step 3: If still exists, force delete
466-
if kubectl get namespace $ns --ignore-not-found 2>/dev/null; then
467-
echo "⚡ Graceful deletion failed, forcing deletion..."
468-
469-
# Kill the background delete process
470-
kill $DELETE_PID 2>/dev/null || true
471-
472-
# Get current namespace JSON and remove finalizers
473-
kubectl get namespace $ns -o json | \
474-
jq 'del(.spec.finalizers[])' | \
475-
kubectl replace --raw "/api/v1/namespaces/$ns/finalize" -f - 2>/dev/null || true
476-
477-
# Alternative approach - patch the namespace directly
478-
kubectl patch namespace $ns -p '{"metadata":{"finalizers":[]}}' --type=merge 2>/dev/null || true
479-
480-
# Wait a bit more
481-
sleep 15
482-
483-
# Final check and force if needed
484-
if kubectl get namespace $ns --ignore-not-found 2>/dev/null; then
485-
echo "🚨 Trying nuclear option - direct deletion..."
486-
487-
# Delete the namespace object directly
488-
kubectl delete namespace $ns --force --grace-period=0 2>/dev/null || true
489-
490-
# Patch with empty spec
491-
kubectl patch namespace $ns -p '{"spec":{"finalizers":[]}}' --type=merge 2>/dev/null || true
492-
kubectl patch namespace $ns -p '{"metadata":{"finalizers":[]}}' --type=merge 2>/dev/null || true
493-
494-
sleep 10
495-
fi
496-
fi
461+
# Force delete with zero grace period
462+
kubectl delete namespace "$ns" --force --grace-period=0 2>/dev/null || true
497463
498-
# Final verification
499-
if kubectl get namespace $ns --ignore-not-found 2>/dev/null; then
500-
echo "❌ WARNING: Namespace $ns still exists after all attempts"
501-
echo "📋 Final namespace details:"
502-
kubectl get namespace $ns -o yaml || true
464+
# Wait a moment and check
465+
sleep 10
466+
467+
if kubectl get namespace "$ns" &>/dev/null; then
468+
echo "WARNING: Namespace $ns still exists after force deletion attempts"
503469
return 1
504470
else
505-
echo "Successfully deleted namespace $ns"
471+
echo "Successfully force-deleted namespace $ns"
506472
return 0
507473
fi
508474
}
509475
510476
# Array of namespaces to delete
511-
NAMESPACES=("${{ vars.APP_NAMESPACE }}" "${{ vars.MONITORING_NAMESPACE }}" "${{ vars.ARGOCD_NAMESPACE }}" "ingress-nginx" "${{ vars.KARPENTER_NAMESPACE }}")
477+
NAMESPACES=("my-solar-system-app-namespace" "my-solar-system-app-monitoring" "my-solar-system-app-argocd" "ingress-nginx" "karpenter")
512478
513-
# Delete each namespace
479+
# Track failed deletions
514480
FAILED_NAMESPACES=()
481+
482+
# Delete each namespace
515483
for ns in "${NAMESPACES[@]}"; do
516484
if [[ -n "$ns" ]]; then
517-
if ! force_delete_namespace "$ns"; then
485+
if ! delete_namespace "$ns"; then
518486
FAILED_NAMESPACES+=("$ns")
519487
fi
520-
echo ""
488+
echo "----------------------------------------"
521489
fi
522490
done
523491
524492
# Summary
525-
echo "========== NAMESPACE CLEANUP SUMMARY =========="
526-
echo "📊 Remaining namespaces:"
527-
kubectl get namespaces || true
493+
echo "CLEANUP SUMMARY"
494+
echo "Remaining namespaces:"
495+
kubectl get namespaces 2>/dev/null || echo "Could not list namespaces"
528496
529497
if [[ ${#FAILED_NAMESPACES[@]} -eq 0 ]]; then
530-
echo "✅ All target namespaces successfully deleted!"
498+
echo "All target namespaces successfully deleted"
499+
exit 0
531500
else
532-
echo "❌ Failed to delete namespaces: ${FAILED_NAMESPACES[*]}"
533-
echo "⚠️ You may need to check these manually after terraform destroy completes"
534-
# Don't fail the workflow for namespace cleanup issues
501+
echo "Failed to delete namespaces: ${FAILED_NAMESPACES[*]}"
502+
echo "These may need manual cleanup"
503+
# Don't fail the workflow - this is cleanup, not critical infrastructure
504+
exit 0
535505
fi
536-
537-
echo "✅ Namespace deletion process completed"
538506
continue-on-error: true
539507

540508
# ==================================================

.github/workflows/endpoints.yml

Lines changed: 37 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,9 @@ jobs:
5555
APP_HOST=$(kubectl get svc ${{ inputs.app_name }}-svc -n ${{ inputs.app_namespace }} -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' 2>/dev/null || echo 'Not found')
5656
5757
# Monitoring ingress information
58-
NGINX_HOSTNAME=$(kubectl get svc -n ingress-nginx ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
58+
MONITORING_DOMAIN=$(kubectl get ingress -n ${{ inputs.monitoring_namespace }} -o jsonpath='{.items[0].spec.rules[0].host}' 2>/dev/null || echo 'Not found')
59+
INGRESS_IP=$(kubectl get ingress -n ${{ inputs.monitoring_namespace }} -o jsonpath='{.items[0].status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo '')
60+
INGRESS_HOSTNAME=$(kubectl get ingress -n ${{ inputs.monitoring_namespace }} -o jsonpath='{.items[0].status.loadBalancer.ingress[0].hostname}' 2>/dev/null || echo '')
5961
6062
# NGINX ingress controller LoadBalancer
6163
NGINX_LB=$(kubectl get svc -n ingress-nginx ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' 2>/dev/null || echo 'Not found')
@@ -65,23 +67,48 @@ jobs:
6567
echo "🌟 App: http://$APP_HOST"
6668
echo ""
6769
echo "📊 Monitoring Services (via Ingress):"
68-
if [ "$NGINX_HOSTNAME" != "Not found" ]; then
69-
echo " Domain: $NGINX_HOSTNAME"
70-
echo " 📊 Prometheus: http://$NGINX_HOSTNAME/prometheus"
71-
echo " 📈 Grafana: http://$NGINX_HOSTNAME/grafana"
72-
echo " 🚨 Alertmanager: http://$NGINX_HOSTNAME/alertmanager"
70+
if [ "$MONITORING_DOMAIN" != "Not found" ]; then
71+
echo " Domain: $MONITORING_DOMAIN"
72+
echo " 📊 Prometheus: http://$MONITORING_DOMAIN/prometheus"
73+
echo " 📈 Grafana: http://$MONITORING_DOMAIN/grafana"
74+
echo " 🚨 Alertmanager: http://$MONITORING_DOMAIN/alertmanager"
75+
echo ""
76+
echo " Ingress Status:"
77+
[ ! -z "$INGRESS_IP" ] && echo " IP: $INGRESS_IP"
78+
[ ! -z "$INGRESS_HOSTNAME" ] && echo " LoadBalancer: $INGRESS_HOSTNAME"
79+
else
80+
echo " ⚠️ Ingress not found or not ready"
7381
fi
82+
7483
echo ""
7584
echo "🌐 NGINX Ingress Controller: $NGINX_LB"
7685
86+
echo ""
87+
echo "================= DETAILED INGRESS STATUS ================="
88+
echo "All Ingress Resources:"
89+
kubectl get ingress -A -o wide 2>/dev/null || echo "No ingress resources found"
90+
echo ""
91+
echo "Monitoring Namespace Ingress Details:"
92+
kubectl describe ingress -n ${{ inputs.monitoring_namespace }} 2>/dev/null || echo "No ingress in monitoring namespace"
93+
94+
echo ""
95+
echo "================= SERVICE STATUS ================="
96+
echo "Monitoring Services (should be ClusterIP):"
97+
kubectl get svc -n ${{ inputs.monitoring_namespace }} -l app.kubernetes.io/instance=kube-prometheus-stack 2>/dev/null || echo "No monitoring services found"
98+
99+
echo ""
100+
echo "All LoadBalancer Services:"
101+
kubectl get svc -A --field-selector spec.type=LoadBalancer -o wide 2>/dev/null || echo "No LoadBalancer services found"
102+
103+
echo ""
104+
echo "NGINX Ingress Controller Status:"
105+
kubectl get svc -n ingress-nginx -o wide 2>/dev/null || echo "NGINX Ingress Controller not found"
106+
77107
echo ""
78108
echo "================= DEFAULT CREDENTIALS ================="
79109
ARGOCD_PASS=$(kubectl -n ${{ inputs.argocd_namespace }} get secret argocd-initial-admin-secret -o jsonpath='{.data.password}' 2>/dev/null | base64 -d || echo 'Not found')
80110
echo "ArgoCD -> Username: admin"
81111
echo "ArgoCD -> Password: $ARGOCD_PASS"
82112
echo "Grafana -> Username: admin"
83-
echo "Grafana -> Password: (stored in GitHub Secrets, not printed)"
113+
echo "Grafana -> Password: ${{ secrets.GRAFANA_PASSWORD }}"
84114
echo "Prometheus -> No login needed (anonymous access by default)"
85-
echo "Alertmanager -> No login needed (anonymous access by default)"
86-
echo ""
87-
echo "⚠️ Note: It may take a few minutes for LoadBalancer endpoints to become fully available after deployment."

Terraform/terraform.tfvars

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ node_groups = {
2828
ssh_key_name = "MyPairKey"
2929
}
3030
}
31-
31+
3232
# Kubernetes Namespace
3333
app_namespace = "my-solar-system-app-namespace"
3434
monitoring_namespace = "my-solar-system-app-monitoring"
@@ -46,4 +46,4 @@ karpenter_namespace = "karpenter"
4646
karpenter_controller_cpu_request = "200m" # Reduced from 500m
4747
karpenter_controller_memory_request = "256Mi" # Reduced from 512Mi
4848
karpenter_controller_cpu_limit = "500m" # Reduced from 1
49-
karpenter_controller_memory_limit = "512Mi" # Reduced from 1Gi
49+
karpenter_controller_memory_limit = "512Mi" # Reduced from 1Gi

0 commit comments

Comments
 (0)