Skip to content

Commit 60691b3

Browse files
committed
K8SPSMDB-1212: Don't stop balancer before restore
1 parent 9f10d86 commit 60691b3

File tree

17 files changed

+286
-100
lines changed

17 files changed

+286
-100
lines changed
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
apiVersion: psmdb.percona.com/v1
2+
kind: PerconaServerMongoDBBackup
3+
metadata:
4+
finalizers:
5+
- percona.com/delete-backup
6+
name: backup-minio
7+
spec:
8+
clusterName: some-name
9+
storageName: minio
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
apiVersion: psmdb.percona.com/v1
2+
kind: PerconaServerMongoDBRestore
3+
metadata:
4+
name:
5+
spec:
6+
clusterName: some-name
7+
backupName:

e2e-tests/balancer/conf/some-name-rs0.yml

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,17 @@ spec:
77
image:
88
imagePullPolicy: Always
99
backup:
10-
enabled: false
10+
enabled: true
11+
image: perconalab/percona-server-mongodb-operator:1.1.0-backup
12+
storages:
13+
minio:
14+
type: s3
15+
s3:
16+
credentialsSecret: minio-secret
17+
region: us-east-1
18+
bucket: operator-testing
19+
endpointUrl: http://minio-service:9000/
20+
insecureSkipTLSVerify: false
1121
sharding:
1222
enabled: true
1323
configsvrReplSet:

e2e-tests/balancer/run

Lines changed: 57 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1,63 +1,77 @@
11
#!/bin/bash
22

33
set -o errexit
4-
set -o xtrace
54

65
test_dir=$(realpath "$(dirname "$0")")
76
. "${test_dir}/../functions"
87
set_debug
98

10-
check_balancer() {
11-
local expected=$1 # should be "full" (running balancer) or "off" (disabled balancer)
9+
log() {
10+
echo "[$(date +%Y-%m-%dT%H:%M:%S%z)]" $*
11+
}
1212

13+
check_balancer() {
14+
local cluster=$1
15+
local expected=$2 # should be "true" (enabled) or "false" (disabled)
16+
local delay=${3:-"0"}
1317
local balancer_running
14-
balancer_running=$(run_mongos 'db.adminCommand({balancerStatus: 1}).mode' "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" \
15-
| grep -E -v "Percona Server for MongoDB|connecting to:|Implicit session:|versions do not match|Error saving history file:|bye")
1618

19+
log "sleeping for ${delay} seconds..."
20+
sleep ${delay}
21+
22+
balancer_running=$(run_mongosh 'sh.getBalancerState()' "clusterAdmin:clusterAdmin123456@${cluster}-cfg.${namespace}" \
23+
| grep -E -v 'Warning|cfg' | grep -E 'true|false')
24+
25+
echo -n "checking if balancer status is ${expected}..."
1726
if [[ $balancer_running != "$expected" ]]; then
18-
echo "Unexpected output from \"db.adminCommand({balancerStatus: 1}).mode\": $balancer_running"
19-
echo "Expected $expected"
27+
echo
28+
log "Unexpected output from \"sh.getBalancerState()\": $balancer_running"
29+
log "Expected: $expected"
2030
exit 1
2131
fi
32+
echo "OK"
2233
}
2334

24-
check_service() {
25-
state=$1
26-
svc_name=$2
27-
if [ $state = "present" ]; then
28-
echo -n "check that $svc_name was created"
29-
local timeout=0
30-
until kubectl_bin get service/$svc_name -o 'jsonpath={.spec.type}' 2>&1 | grep -vq NotFound; do
31-
sleep 1
32-
timeout=$((timeout + 1))
33-
echo -n '.'
34-
if [[ ${timeout} -gt 900 ]]; then
35-
echo "Waiting timeout has been reached. Service $svc_name is not present. Exiting..."
36-
exit 1
37-
fi
38-
done
39-
echo ".OK"
40-
elif [ $state = "removed" ]; then
41-
echo -n "check that $svc_name was removed"
42-
if [[ -z $(kubectl_bin get service/$svc_name -o 'jsonpath={.spec.type}' 2>&1 | grep NotFound) ]]; then
43-
echo "$svc_name was not removed."
44-
exit 1
45-
else
46-
echo ".OK"
47-
fi
48-
else
49-
echo "unknown state $state"
50-
fi
35+
check_backup_and_restore() {
36+
local cluster=$1
37+
local backup_suffix=$2
38+
local balancer_end_state=$3
39+
local backup_name="backup-minio-${backup_suffix}"
40+
41+
log "running backup: ${backup_name}"
42+
run_backup "minio" "${backup_name}"
43+
wait_backup "${backup_name}" "requested"
44+
45+
log "checking if balancer is disabled"
46+
check_balancer ${cluster} "false"
47+
48+
wait_backup "${backup_name}" "ready"
49+
50+
log "checking if balancer is ${balancer_end_state} after backup"
51+
check_balancer ${cluster} ${balancer_end_state} 10
52+
53+
log "running restore: restore-${backup_name}"
54+
run_restore "${backup_name}"
55+
log "checking if balancer status is not changed"
56+
check_balancer ${cluster} "${balancer_end_state}" 4
57+
58+
wait_restore ${backup_name} ${cluster} "ready"
59+
60+
log "checking if balancer is ${balancer_end_state} after restore"
61+
check_balancer ${cluster} ${balancer_end_state} 10
5162
}
5263

5364
main() {
5465
create_infra "$namespace"
5566

67+
deploy_minio
68+
apply_s3_storage_secrets
69+
5670
desc 'create first PSMDB cluster'
5771
cluster="some-name"
5872
kubectl_bin apply \
5973
-f "$conf_dir/secrets.yml" \
60-
-f "$conf_dir/client.yml"
74+
-f "$conf_dir/client-70.yml"
6175

6276
if version_gt "1.19" && [ $EKS -ne 1 ]; then
6377
$sed 's/docker/runc/g' "$conf_dir/container-rc.yaml" | kubectl_bin apply -f -
@@ -70,30 +84,22 @@ main() {
7084
apply_cluster "$test_dir/conf/$cluster-rs0.yml"
7185

7286
desc 'check if all 3 Pods started'
73-
wait_for_running $cluster-rs0 3
87+
wait_for_running $cluster-rs0 3 "false"
7488
wait_for_running $cluster-cfg 3 "false"
7589
wait_for_running $cluster-mongos 3
76-
sleep 20
77-
check_balancer "full"
90+
check_balancer ${cluster} "true" 10
91+
92+
check_backup_and_restore ${cluster} "0" "true"
7893

7994
desc 'disabling balancer'
8095
kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"balancer":{"enabled":false}}}}'
81-
sleep 20
82-
check_balancer "off"
96+
check_balancer ${cluster} "false" 10
97+
98+
check_backup_and_restore ${cluster} "1" "false"
8399

84100
desc 'enabling balancer'
85101
kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"balancer":{"enabled":true}}}}'
86-
sleep 20
87-
check_balancer "full"
88-
89-
# Add check that servicePerPod creates 3 services for the running cluster
90-
desc 'enabling servicePerPod for mongos'
91-
kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"mongos":{"expose":{"servicePerPod":true}}}}}'
92-
wait_for_running $cluster-mongos 3
93-
check_service present $cluster-mongos-0
94-
check_service present $cluster-mongos-1
95-
check_service present $cluster-mongos-2
96-
check_service removed $cluster-mongos
102+
check_balancer ${cluster} "true" 10
97103

98104
destroy "$namespace"
99105
}

e2e-tests/conf/client-70.yml

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
apiVersion: apps/v1
2+
kind: Deployment
3+
metadata:
4+
name: psmdb-client
5+
spec:
6+
replicas: 1
7+
selector:
8+
matchLabels:
9+
name: psmdb-client
10+
template:
11+
metadata:
12+
labels:
13+
name: psmdb-client
14+
spec:
15+
terminationGracePeriodSeconds: 10
16+
containers:
17+
- name: psmdb-client
18+
image: percona/percona-server-mongodb:7.0
19+
imagePullPolicy: Always
20+
command:
21+
- sleep
22+
args:
23+
- "100500"

e2e-tests/functions

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -218,12 +218,13 @@ wait_backup_agent() {
218218

219219
wait_backup() {
220220
local backup_name=$1
221+
local target_state=${2:-"ready"}
221222

222223
set +o xtrace
223224
retry=0
224-
echo -n $backup_name
225+
echo -n "waiting for ${backup_name} to reach ${target_state} state"
225226
local current_status=
226-
until [[ ${current_status} == "ready" ]]; do
227+
until [[ ${current_status} == ${target_state} ]]; do
227228
sleep 1
228229
echo -n .
229230
let retry+=1
@@ -345,7 +346,7 @@ wait_restore() {
345346

346347
set +o xtrace
347348
retry=0
348-
echo -n "waiting psmdb-restore/${backup_name} to reach ${target_state} state"
349+
echo -n "waiting psmdb-restore/restore-${backup_name} to reach ${target_state} state"
349350
local current_state=
350351
until [[ ${current_state} == ${target_state} ]]; do
351352
sleep 1
@@ -734,9 +735,22 @@ run_mongo() {
734735
[[ $uri == *cfg* ]] && replica_set='cfg' || replica_set='rs0'
735736
kubectl_bin exec ${client_container} -- \
736737
bash -c "printf '$command\n' | mongo $driver://$uri$suffix/admin?ssl=false\&replicaSet=$replica_set $mongo_flag"
738+
}
739+
740+
run_mongosh() {
741+
local command="$1"
742+
local uri="$2"
743+
local driver=${3:-"mongodb+srv"}
744+
local suffix=${4:-".svc.cluster.local"}
745+
local mongo_flag="$5"
737746

747+
local client_container=$(kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}')
748+
[[ $uri == *cfg* ]] && replica_set='cfg' || replica_set='rs0'
749+
kubectl_bin exec ${client_container} -- \
750+
bash -c "printf '$command\n' | mongosh --quiet $driver://$uri$suffix/admin?ssl=false\&replicaSet=$replica_set $mongo_flag"
738751
}
739752

753+
740754
run_mongo_tls() {
741755
local command="$1"
742756
local uri="$2"
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
apiVersion: psmdb.percona.com/v1
2+
kind: PerconaServerMongoDB
3+
metadata:
4+
name: some-name
5+
spec:
6+
#platform: openshift
7+
image:
8+
imagePullPolicy: Always
9+
backup:
10+
enabled: false
11+
sharding:
12+
enabled: true
13+
configsvrReplSet:
14+
size: 3
15+
volumeSpec:
16+
persistentVolumeClaim:
17+
resources:
18+
requests:
19+
storage: 3Gi
20+
expose:
21+
enabled: false
22+
23+
mongos:
24+
size: 3
25+
configuration: |
26+
replication:
27+
localPingThresholdMs: 15
28+
expose:
29+
type: ClusterIP
30+
31+
replsets:
32+
- name: rs0
33+
affinity:
34+
antiAffinityTopologyKey: none
35+
expose:
36+
enabled: false
37+
resources:
38+
limits:
39+
cpu: 500m
40+
memory: 1G
41+
requests:
42+
cpu: 100m
43+
memory: 0.1G
44+
volumeSpec:
45+
persistentVolumeClaim:
46+
resources:
47+
requests:
48+
storage: 1Gi
49+
size: 3
50+
secrets:
51+
users: some-users

e2e-tests/service-per-pod/run

Lines changed: 47 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ set_debug
99
check_cr_config() {
1010
local cluster="$1"
1111

12-
desc "create first PSMDB cluster $cluster"
12+
desc "create PSMDB cluster $cluster"
1313
apply_cluster $test_dir/conf/$cluster.yml
1414

1515
desc 'check if all 3 Pods started'
@@ -40,8 +40,8 @@ check_cr_config() {
4040
compare_mongo_cmd "find" "myApp:myPass@$(get_service_ip $cluster-1)" "" ":27017"
4141
compare_mongo_cmd "find" "myApp:myPass@$(get_service_ip $cluster-2)" "" ":27017"
4242

43-
desc 'add service-per-pod lebel and annotation'
4443
if [[ $cluster == "node-port-rs0" ]]; then
44+
desc 'add service-per-pod label and annotation'
4545
old_node_port=$(kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}')
4646
kubectl_bin patch psmdb node-port --type=json --patch '[
4747
{
@@ -74,6 +74,36 @@ check_cr_config() {
7474
-f $test_dir/conf/$cluster.yml
7575
}
7676

77+
check_service() {
78+
state=$1
79+
svc_name=$2
80+
if [ $state = "present" ]; then
81+
echo -n "check that $svc_name was created"
82+
local timeout=0
83+
until kubectl_bin get service/$svc_name -o 'jsonpath={.spec.type}' 2>&1 | grep -vq NotFound; do
84+
sleep 1
85+
timeout=$((timeout + 1))
86+
echo -n '.'
87+
if [[ ${timeout} -gt 900 ]]; then
88+
echo "Waiting timeout has been reached. Service $svc_name is not present. Exiting..."
89+
exit 1
90+
fi
91+
done
92+
echo ".OK"
93+
elif [ $state = "removed" ]; then
94+
echo -n "check that $svc_name was removed"
95+
if [[ -z $(kubectl_bin get service/$svc_name -o 'jsonpath={.spec.type}' 2>&1 | grep NotFound) ]]; then
96+
echo "$svc_name was not removed."
97+
exit 1
98+
else
99+
echo ".OK"
100+
fi
101+
else
102+
echo "unknown state $state"
103+
fi
104+
}
105+
106+
77107
main() {
78108
create_infra $namespace
79109
deploy_cert_manager
@@ -92,6 +122,21 @@ main() {
92122
desc 'check NodePort'
93123
check_cr_config "node-port-rs0"
94124

125+
desc 'check Mongos in sharded cluster'
126+
local cluster=some-name
127+
apply_cluster "$test_dir/conf/sharded.yml"
128+
wait_for_running $cluster-rs0 3
129+
wait_for_running $cluster-cfg 3 "false"
130+
wait_for_running $cluster-mongos 3
131+
132+
desc 'enabling servicePerPod for mongos'
133+
kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"mongos":{"expose":{"servicePerPod":true}}}}}'
134+
wait_for_running $cluster-mongos 3
135+
check_service present $cluster-mongos-0
136+
check_service present $cluster-mongos-1
137+
check_service present $cluster-mongos-2
138+
check_service removed $cluster-mongos
139+
95140
destroy $namespace
96141

97142
desc 'test passed'

0 commit comments

Comments
 (0)