11#! /bin/bash
22
33set -o errexit
4- set -o xtrace
54
65test_dir=$( realpath " $( dirname " $0 " ) " )
76. " ${test_dir} /../functions"
87set_debug
98
10- check_balancer () {
11- local expected=$1 # should be "full" (running balancer) or "off" (disabled balancer)
9+ log () {
10+ echo " [$( date +%Y-%m-%dT%H:%M:%S%z) ]" $*
11+ }
1212
13+ check_balancer () {
14+ local cluster=$1
15+ local expected=$2 # should be "true" (enabled) or "false" (disabled)
16+ local delay=${3:- " 0" }
1317 local balancer_running
14- balancer_running=$( run_mongos ' db.adminCommand({balancerStatus: 1}).mode' " clusterAdmin:clusterAdmin123456@$cluster -mongos.$namespace " \
15- | grep -E -v " Percona Server for MongoDB|connecting to:|Implicit session:|versions do not match|Error saving history file:|bye" )
1618
19+ log " sleeping for ${delay} seconds..."
20+ sleep ${delay}
21+
22+ balancer_running=$( run_mongosh ' sh.getBalancerState()' " clusterAdmin:clusterAdmin123456@${cluster} -cfg.${namespace} " \
23+ | grep -E -v ' Warning|cfg' | grep -E ' true|false' )
24+
25+ echo -n " checking if balancer status is ${expected} ..."
1726 if [[ $balancer_running != " $expected " ]]; then
18- echo " Unexpected output from \" db.adminCommand({balancerStatus: 1}).mode\" : $balancer_running "
19- echo " Expected $expected "
27+ echo
28+ log " Unexpected output from \" sh.getBalancerState()\" : $balancer_running "
29+ log " Expected: $expected "
2030 exit 1
2131 fi
32+ echo " OK"
2233}
2334
24- check_service () {
25- state =$1
26- svc_name =$2
27- if [ $state = " present " ] ; then
28- echo -n " check that $svc_name was created "
29- local timeout=0
30- until kubectl_bin get service/ $svc_name -o ' jsonpath={.spec.type} ' 2>&1 | grep -vq NotFound ; do
31- sleep 1
32- timeout= $(( timeout + 1 ))
33- echo -n ' . '
34- if [[ ${timeout} -gt 900 ]] ; then
35- echo " Waiting timeout has been reached. Service $svc_name is not present. Exiting... "
36- exit 1
37- fi
38- done
39- echo " .OK "
40- elif [ $state = " removed " ] ; then
41- echo -n " check that $svc_name was removed "
42- if [[ -z $( kubectl_bin get service/ $svc_name -o ' jsonpath={.spec.type} ' 2>&1 | grep NotFound ) ]] ; then
43- echo " $svc_name was not removed. "
44- exit 1
45- else
46- echo " .OK "
47- fi
48- else
49- echo " unknown state $state "
50- fi
35+ check_backup_and_restore () {
36+ local cluster =$1
37+ local backup_suffix =$2
38+ local balancer_end_state= $3
39+ local backup_name= " backup-minio- ${backup_suffix} "
40+
41+ log " running backup: ${backup_name} "
42+ run_backup " minio " " ${backup_name} "
43+ wait_backup " ${backup_name} " " requested "
44+
45+ log " checking if balancer is disabled "
46+ check_balancer ${cluster} " false "
47+
48+ wait_backup " ${backup_name} " " ready "
49+
50+ log " checking if balancer is ${balancer_end_state} after backup "
51+ check_balancer ${cluster} ${balancer_end_state} 10
52+
53+ log " running restore: restore- ${backup_name} "
54+ run_restore " ${backup_name} "
55+ log " checking if balancer status is not changed "
56+ check_balancer ${cluster} " ${balancer_end_state} " 4
57+
58+ wait_restore ${backup_name} ${cluster} " ready "
59+
60+ log " checking if balancer is ${balancer_end_state} after restore "
61+ check_balancer ${cluster} ${balancer_end_state} 10
5162}
5263
5364main () {
5465 create_infra " $namespace "
5566
67+ deploy_minio
68+ apply_s3_storage_secrets
69+
5670 desc ' create first PSMDB cluster'
5771 cluster=" some-name"
5872 kubectl_bin apply \
5973 -f " $conf_dir /secrets.yml" \
60- -f " $conf_dir /client.yml"
74+ -f " $conf_dir /client-70 .yml"
6175
6276 if version_gt " 1.19" && [ $EKS -ne 1 ]; then
6377 $sed ' s/docker/runc/g' " $conf_dir /container-rc.yaml" | kubectl_bin apply -f -
@@ -70,30 +84,22 @@ main() {
7084 apply_cluster " $test_dir /conf/$cluster -rs0.yml"
7185
7286 desc ' check if all 3 Pods started'
73- wait_for_running $cluster -rs0 3
87+ wait_for_running $cluster -rs0 3 " false "
7488 wait_for_running $cluster -cfg 3 " false"
7589 wait_for_running $cluster -mongos 3
76- sleep 20
77- check_balancer " full"
90+ check_balancer ${cluster} " true" 10
91+
92+ check_backup_and_restore ${cluster} " 0" " true"
7893
7994 desc ' disabling balancer'
8095 kubectl patch psmdb some-name --type=merge -p ' {"spec":{"sharding":{"balancer":{"enabled":false}}}}'
81- sleep 20
82- check_balancer " off"
96+ check_balancer ${cluster} " false" 10
97+
98+ check_backup_and_restore ${cluster} " 1" " false"
8399
84100 desc ' enabling balancer'
85101 kubectl patch psmdb some-name --type=merge -p ' {"spec":{"sharding":{"balancer":{"enabled":true}}}}'
86- sleep 20
87- check_balancer " full"
88-
89- # Add check that servicePerPod creates 3 services for the running cluster
90- desc ' enabling servicePerPod for mongos'
91- kubectl patch psmdb some-name --type=merge -p ' {"spec":{"sharding":{"mongos":{"expose":{"servicePerPod":true}}}}}'
92- wait_for_running $cluster -mongos 3
93- check_service present $cluster -mongos-0
94- check_service present $cluster -mongos-1
95- check_service present $cluster -mongos-2
96- check_service removed $cluster -mongos
102+ check_balancer ${cluster} " true" 10
97103
98104 destroy " $namespace "
99105}
0 commit comments