Skip to content

Commit 16f26ba

Browse files
committed
K8SPSMDB-1080 - Use trap to catch exit status
1 parent 31091a0 commit 16f26ba

File tree

27 files changed

+14
-81
lines changed
  • e2e-tests
    • arbiter
    • balancer
    • cross-site-sharded
    • data-at-rest-encryption
    • data-sharded
    • default-cr
    • demand-backup-physical-sharded
    • demand-backup-physical
    • demand-backup-sharded
    • demand-backup
    • expose-sharded
    • init-deploy
    • mongod-major-upgrade-sharded
    • mongod-major-upgrade
    • monitoring-2-0
    • multi-cluster-service
    • rs-shard-migration
    • self-healing-chaos
    • service-per-pod
    • smart-update
    • split-horizon
    • tls-issue-cert-manager
    • upgrade-sharded
    • upgrade
    • version-service

27 files changed

+14
-81
lines changed

Jenkinsfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -505,7 +505,7 @@ EOF
505505

506506
unstash 'IMAGE'
507507
def IMAGE = sh(returnStdout: true, script: "cat results/docker/TAG").trim()
508-
TestsReport = TestsReport + "\r\n\r\ncommit: ${env.CHANGE_URL}/commits/${env.GIT_COMMIT}\r\nimage: `${IMAGE}`\r\n"
508+
TestsReport = TestsReport + "\r\n\r\ncommit: ${env.CHANGE_URL}/commits/${env.GIT_COMMIT}\r\nimage: `${IMAGE}`\r\nlogs: s3://percona-jenkins-artifactory/cloud-psmdb-operator/PR-${env.CHANGE_ID}/${env.GIT_COMMIT}/logs/"
509509
pullRequest.comment(TestsReport)
510510
}
511511
}

e2e-tests/arbiter/run

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ check_cr_config() {
3131
if [[ $(kubectl_bin get pod \
3232
--selector=statefulset.kubernetes.io/pod-name="${cluster}-arbiter-0" \
3333
-o jsonpath='{.items[*].status.containerStatuses[?(@.name == "mongod-arbiter")].restartCount}') -gt 0 ]]; then
34-
collect_k8s_logs
3534
echo "Something went wrong with arbiter. Exiting..."
3635
exit 1
3736
fi

e2e-tests/balancer/run

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ check_balancer() {
1515
| grep -E -v "Percona Server for MongoDB|connecting to:|Implicit session:|versions do not match|Error saving history file:|bye")
1616

1717
if [[ $balancer_running != "$expected" ]]; then
18-
collect_k8s_logs
1918
echo "Unexpected output from \"db.adminCommand({balancerStatus: 1}).mode\": $balancer_running"
2019
echo "Expected $expected"
2120
exit 1

e2e-tests/cross-site-sharded/run

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,6 @@ for i in "rs0" "rs1"; do
101101
done
102102

103103
if [[ $shards -lt 2 ]]; then
104-
collect_k8s_logs
105104
echo "data is only on some of the shards, maybe sharding is not working"
106105
exit 1
107106
fi

e2e-tests/data-at-rest-encryption/run

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,6 @@ encrypted_cluster_log=$(kubectl_bin logs some-name-rs0-0 -c mongod -n $namespace
8383

8484
echo "$encrypted_cluster_log"
8585
if [ -z "$encrypted_cluster_log" ]; then
86-
collect_k8s_logs
8786
echo "Cluster is not encrypted"
8887
exit 1
8988
fi
@@ -100,7 +99,6 @@ until [ "$retry" -ge 10 ]; do
10099
echo "Cluster is not encrypted already"
101100
break
102101
elif [ $retry == 15 ]; then
103-
collect_k8s_logs
104102
echo "Max retry count $retry reached. Cluster is still encrypted"
105103
exit 1
106104
else

e2e-tests/data-sharded/run

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ check_rs_proper_component_deletion() {
1717
until [[ $(kubectl_bin get sts -l app.kubernetes.io/instance=${cluster},app.kubernetes.io/replset=${rs_name} -ojson | jq '.items | length') -eq 0 ]]; do
1818
let retry+=1
1919
if [ $retry -ge 70 ]; then
20-
collect_k8s_logs
2120
sts_count=$(kubectl_bin get sts -l app.kubernetes.io/instance=${cluster},app.kubernetes.io/replset=${rs_name} -ojson | jq '.items | length')
2221
echo "Replset $rs_name not properly removed, expected sts count of 0 but got $sts_count. Exiting after $retry tries..."
2322
exit 1
@@ -116,7 +115,6 @@ main() {
116115
done
117116

118117
if [[ $shards -lt 3 ]]; then
119-
collect_k8s_logs
120118
echo "data is only on some of the shards, maybe sharding is not working"
121119
exit 1
122120
fi
@@ -127,7 +125,6 @@ main() {
127125
"clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \
128126
"--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls")
129127
if ! echo $res | grep -q '"ok" : 1'; then
130-
collect_k8s_logs
131128
echo "app database not dropped. Exiting.."
132129
exit 1
133130
fi

e2e-tests/default-cr/run

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ function stop_cluster() {
2727
let passed_time="${passed_time}+${sleep_time}"
2828
sleep ${sleep_time}
2929
if [[ ${passed_time} -gt ${max_wait_time} ]]; then
30-
collect_k8s_logs
3130
echo "We've been waiting for cluster stop for too long. Exiting..."
3231
exit 1
3332
fi

e2e-tests/demand-backup-physical-sharded/run

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ run_recovery_check() {
3838
wait_restore "${backup_name}" "${cluster}" "ready" "0" "1800"
3939
kubectl_bin get psmdb ${cluster} -o yaml
4040
if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then
41-
collect_k8s_logs
4241
echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore"
4342
exit 1
4443
fi
@@ -53,7 +52,6 @@ check_exported_mongos_service_endpoint() {
5352
local host=$1
5453

5554
if [ "$host" != "$(kubectl_bin get psmdb $cluster -o=jsonpath='{.status.host}')" ]; then
56-
collect_k8s_logs
5755
echo "Exported host is not correct after the restore"
5856
exit 1
5957
fi
@@ -82,7 +80,6 @@ wait_cluster_consistency ${cluster}
8280
lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' |
8381
jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0][]')
8482
if [ -z $lbEndpoint ]; then
85-
collect_k8s_logs
8683
echo "mongos service not exported correctly"
8784
exit 1
8885
fi

e2e-tests/demand-backup-physical/run

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ run_recovery_check() {
3838
wait_restore "${backup_name}" "${cluster}" "ready" "0" "1800"
3939
kubectl_bin get psmdb ${cluster} -o yaml
4040
if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then
41-
collect_k8s_logs
4241
echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore"
4342
exit 1
4443
fi

e2e-tests/demand-backup-sharded/run

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,6 @@ backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --rest
166166
/usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \
167167
| grep -c ${backup_dest_minio}_ | cat)
168168
if [[ $backup_exists -eq 1 ]]; then
169-
collect_k8s_logs
170169
echo "Backup was not removed from bucket -- minio"
171170
exit 1
172171
fi

0 commit comments

Comments
 (0)