You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Set SSL Cert ConfigMap in KOTS and Improve Detection of Unhealthy Pods in Tests (#2239)
* re-add privateCAs value so that KOTS can use it to configure the SDK
* don't use old value and instead just set SSL_CERT_CONFIGMAP env var
* add additional handling to pod validation to not fail on job pods in an error state
* fix job completion validation
* add retry and timeout for pod validation
* allow pending pods in validation for TestMultiNodeHAInstallation test
* fix unbound variable
* bypass pending pods for multinode airgap ha
* fix extraEnv and check for unready pods
* fix dry run test
Copy file name to clipboardExpand all lines: e2e/scripts/common.sh
+109-5Lines changed: 109 additions & 5 deletions
Original file line number
Diff line number
Diff line change
@@ -455,12 +455,116 @@ validate_data_dirs() {
455
455
fi
456
456
}
457
457
458
-
validate_no_pods_in_crashloop() {
459
-
if kubectl get pods -A | grep CrashLoopBackOff -q ;then
460
-
echo"found pods in CrashLoopBackOff state"
461
-
kubectl get pods -A | grep CrashLoopBackOff
462
-
exit 1
458
+
validate_non_job_pods_healthy() {
459
+
local unhealthy_pods
460
+
local unready_pods
461
+
462
+
# Check for environment variable override (used by specific tests)
463
+
if [ "${ALLOW_PENDING_PODS:-}"="true" ];then
464
+
# Allow Running, Completed, Succeeded, Pending
465
+
unhealthy_pods=$(kubectl get pods -A --no-headers -o custom-columns="NAMESPACE:.metadata.namespace,NAME:.metadata.name,STATUS:.status.phase,OWNER:.metadata.ownerReferences[0].kind"| \
echo"All non-Job pods are healthy (allowing Pending pods)"
468
+
else
469
+
# Default: only allow Running, Completed, Succeeded
470
+
unhealthy_pods=$(kubectl get pods -A --no-headers -o custom-columns="NAMESPACE:.metadata.namespace,NAME:.metadata.name,STATUS:.status.phase,OWNER:.metadata.ownerReferences[0].kind"| \
# Check container readiness for Running pods (skip Completed/Succeeded pods as they don't need to be ready)
476
+
unready_pods=$(kubectl get pods -A --no-headers -o custom-columns="NAMESPACE:.metadata.namespace,NAME:.metadata.name,STATUS:.status.phase,READY:.status.containerStatuses[*].ready,OWNER:.metadata.ownerReferences[0].kind"| \
echo"found non-Job pods that are Running but not ready:"
489
+
echo"$unready_pods"
490
+
has_issues=1
491
+
fi
492
+
493
+
if [ $has_issues-eq 1 ];then
494
+
return 1
495
+
fi
496
+
497
+
return 0
498
+
}
499
+
500
+
validate_jobs_completed() {
501
+
local incomplete_jobs
502
+
# Check that all Jobs have succeeded (status.succeeded should equal spec.completions)
503
+
# Flag any job that hasn't fully succeeded
504
+
incomplete_jobs=$(kubectl get jobs -A --no-headers -o custom-columns="NAMESPACE:.metadata.namespace,NAME:.metadata.name,COMPLETIONS:.spec.completions,SUCCESSFUL:.status.succeeded"| \
0 commit comments