Skip to content

Commit 9bbb75f

Browse files
authored
K8SPSMDB-886 - Fix tests (#1332)
* Fix tests * Fix default-cr test * Upgrade jq to 1.7 and fix some tests * Return older jq version and fix demand-backup-physical-sharded test * Fix demand-backup-physical test on openshift
1 parent 0b10e0e commit 9bbb75f

File tree

10 files changed

+267
-34
lines changed

10 files changed

+267
-34
lines changed

e2e-tests/custom-replset-name/run

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,9 @@ kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/
1515
cluster="some-name"
1616

1717
desc 'create first PSMDB cluster'
18-
apply_cluster $test_dir/conf/${cluster}.yml
18+
sc=$(kubectl get storageclass -o custom-columns=NAME:.metadata.name --no-headers=true | tail -n1)
19+
yq eval '(.. | select(has("storageClassName")).storageClassName) = "'$sc'"' $test_dir/conf/$cluster.yml >${tmp_dir}/$cluster.yml
20+
apply_cluster ${tmp_dir}/$cluster.yml
1921
wait_cluster_consistency $cluster
2022

2123
desc 'write data, read from all'

e2e-tests/default-cr/compare/statefulset_minimal-cluster-cfg-oc.yml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,6 @@ spec:
8787
- /opt/percona/mongodb-healthcheck
8888
- k8s
8989
- liveness
90-
- --ssl
91-
- --sslInsecure
92-
- --sslCAFile
93-
- /etc/mongodb-ssl/ca.crt
94-
- --sslPEMKeyFile
95-
- /tmp/tls.pem
9690
- --startupDelaySeconds
9791
- "7200"
9892
failureThreshold: 4

e2e-tests/default-cr/compare/statefulset_minimal-cluster-cfg.yml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,6 @@ spec:
8787
- /opt/percona/mongodb-healthcheck
8888
- k8s
8989
- liveness
90-
- --ssl
91-
- --sslInsecure
92-
- --sslCAFile
93-
- /etc/mongodb-ssl/ca.crt
94-
- --sslPEMKeyFile
95-
- /tmp/tls.pem
9690
- --startupDelaySeconds
9791
- "7200"
9892
failureThreshold: 4

e2e-tests/default-cr/compare/statefulset_minimal-cluster-rs0-oc.yml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,6 @@ spec:
8787
- /opt/percona/mongodb-healthcheck
8888
- k8s
8989
- liveness
90-
- --ssl
91-
- --sslInsecure
92-
- --sslCAFile
93-
- /etc/mongodb-ssl/ca.crt
94-
- --sslPEMKeyFile
95-
- /tmp/tls.pem
9690
- --startupDelaySeconds
9791
- "7200"
9892
failureThreshold: 4

e2e-tests/default-cr/compare/statefulset_minimal-cluster-rs0.yml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,6 @@ spec:
8787
- /opt/percona/mongodb-healthcheck
8888
- k8s
8989
- liveness
90-
- --ssl
91-
- --sslInsecure
92-
- --sslCAFile
93-
- /etc/mongodb-ssl/ca.crt
94-
- --sslPEMKeyFile
95-
- /tmp/tls.pem
9690
- --startupDelaySeconds
9791
- "7200"
9892
failureThreshold: 4

e2e-tests/default-cr/run

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ function start_cluster() {
3939
local cluster_name=$1
4040

4141
kubectl_bin patch psmdb ${cluster_name} --type json -p='[{"op":"add","path":"/spec/pause","value":false}]'
42-
wait_cluster_consistency ${cluster_name}
42+
wait_cluster_consistency ${cluster_name} 42
4343
}
4444

4545
function main() {
@@ -70,7 +70,7 @@ function main() {
7070
| kubectl_bin apply -f -
7171

7272
desc 'check if all 3 Pods started'
73-
wait_cluster_consistency $cluster
73+
wait_cluster_consistency $cluster 42
7474

7575
desc 'check if service and statefulset created with expected config'
7676
compare_kubectl statefulset/$cluster-rs0

e2e-tests/demand-backup-physical-sharded/run

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,13 @@ run_recovery_check() {
2929
local backup_name=$1
3030
local compare_suffix=${2:-"_restore"}
3131

32-
wait_restore "${backup_name}" "${cluster}" "requested" "0"
32+
wait_restore "${backup_name}" "${cluster}" "requested" "0" "420"
3333
echo
3434

3535
compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix}
3636

3737
# we don't wait for cluster readiness here because the annotation gets removed then
38-
wait_restore "${backup_name}" "${cluster}" "ready" "0"
38+
wait_restore "${backup_name}" "${cluster}" "ready" "0" "900"
3939
kubectl_bin get psmdb ${cluster} -o yaml
4040
if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then
4141
echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore"
@@ -78,7 +78,7 @@ wait_for_running ${cluster}-mongos 3
7878
wait_cluster_consistency ${cluster}
7979

8080
lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' \
81-
| jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0].ip')
81+
| jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0][]')
8282
if [ -z $lbEndpoint ]; then
8383
echo "mongos service not exported correctly"
8484
exit 1
Lines changed: 253 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,253 @@
1+
apiVersion: apps/v1
2+
kind: StatefulSet
3+
metadata:
4+
annotations:
5+
percona.com/restore-in-progress: "true"
6+
generation: 3
7+
labels:
8+
app.kubernetes.io/component: mongod
9+
app.kubernetes.io/instance: some-name
10+
app.kubernetes.io/managed-by: percona-server-mongodb-operator
11+
app.kubernetes.io/name: percona-server-mongodb
12+
app.kubernetes.io/part-of: percona-server-mongodb
13+
app.kubernetes.io/replset: rs0
14+
name: some-name-rs0
15+
ownerReferences:
16+
- controller: true
17+
kind: PerconaServerMongoDB
18+
name: some-name
19+
spec:
20+
podManagementPolicy: OrderedReady
21+
replicas: 4
22+
revisionHistoryLimit: 10
23+
selector:
24+
matchLabels:
25+
app.kubernetes.io/component: mongod
26+
app.kubernetes.io/instance: some-name
27+
app.kubernetes.io/managed-by: percona-server-mongodb-operator
28+
app.kubernetes.io/name: percona-server-mongodb
29+
app.kubernetes.io/part-of: percona-server-mongodb
30+
app.kubernetes.io/replset: rs0
31+
serviceName: some-name-rs0
32+
template:
33+
metadata:
34+
annotations: {}
35+
labels:
36+
app.kubernetes.io/component: mongod
37+
app.kubernetes.io/instance: some-name
38+
app.kubernetes.io/managed-by: percona-server-mongodb-operator
39+
app.kubernetes.io/name: percona-server-mongodb
40+
app.kubernetes.io/part-of: percona-server-mongodb
41+
app.kubernetes.io/replset: rs0
42+
spec:
43+
containers:
44+
- args:
45+
- --bind_ip_all
46+
- --auth
47+
- --dbpath=/data/db
48+
- --port=27017
49+
- --replSet=rs0
50+
- --storageEngine=wiredTiger
51+
- --relaxPermChecks
52+
- --sslAllowInvalidCertificates
53+
- --clusterAuthMode=x509
54+
- --enableEncryption
55+
- --encryptionKeyFile=/etc/mongodb-encryption/encryption-key
56+
- --wiredTigerCacheSizeGB=0.25
57+
- --wiredTigerIndexPrefixCompression=true
58+
- --config=/etc/mongodb-config/mongod.conf
59+
command:
60+
- /opt/percona/physical-restore-ps-entry.sh
61+
env:
62+
- name: SERVICE_NAME
63+
value: some-name
64+
- name: MONGODB_PORT
65+
value: "27017"
66+
- name: MONGODB_REPLSET
67+
value: rs0
68+
- name: PBM_AGENT_MONGODB_USERNAME
69+
valueFrom:
70+
secretKeyRef:
71+
key: MONGODB_BACKUP_USER
72+
name: some-users
73+
- name: PBM_AGENT_MONGODB_PASSWORD
74+
valueFrom:
75+
secretKeyRef:
76+
key: MONGODB_BACKUP_PASSWORD
77+
name: some-users
78+
- name: POD_NAME
79+
valueFrom:
80+
fieldRef:
81+
apiVersion: v1
82+
fieldPath: metadata.name
83+
- name: PBM_MONGODB_URI
84+
value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME)
85+
envFrom:
86+
- secretRef:
87+
name: internal-some-name-users
88+
optional: false
89+
imagePullPolicy: Always
90+
livenessProbe:
91+
exec:
92+
command:
93+
- /opt/percona/mongodb-healthcheck
94+
- k8s
95+
- liveness
96+
- --ssl
97+
- --sslInsecure
98+
- --sslCAFile
99+
- /etc/mongodb-ssl/ca.crt
100+
- --sslPEMKeyFile
101+
- /tmp/tls.pem
102+
- --startupDelaySeconds
103+
- "7200"
104+
failureThreshold: 4
105+
initialDelaySeconds: 60
106+
periodSeconds: 30
107+
successThreshold: 1
108+
timeoutSeconds: 10
109+
name: mongod
110+
ports:
111+
- containerPort: 27017
112+
name: mongodb
113+
protocol: TCP
114+
readinessProbe:
115+
exec:
116+
command:
117+
- /opt/percona/mongodb-healthcheck
118+
- k8s
119+
- readiness
120+
- --component
121+
- mongod
122+
failureThreshold: 8
123+
initialDelaySeconds: 10
124+
periodSeconds: 3
125+
successThreshold: 1
126+
timeoutSeconds: 2
127+
resources:
128+
limits:
129+
cpu: 500m
130+
memory: 1G
131+
requests:
132+
cpu: 100m
133+
memory: 100M
134+
securityContext:
135+
runAsNonRoot: true
136+
terminationMessagePath: /dev/termination-log
137+
terminationMessagePolicy: File
138+
volumeMounts:
139+
- mountPath: /data/db
140+
name: mongod-data
141+
- mountPath: /etc/mongodb-secrets
142+
name: some-name-mongodb-keyfile
143+
readOnly: true
144+
- mountPath: /etc/mongodb-ssl
145+
name: ssl
146+
readOnly: true
147+
- mountPath: /etc/mongodb-ssl-internal
148+
name: ssl-internal
149+
readOnly: true
150+
- mountPath: /etc/mongodb-config
151+
name: config
152+
- mountPath: /opt/percona
153+
name: bin
154+
- mountPath: /etc/mongodb-encryption
155+
name: some-name-mongodb-encryption-key
156+
readOnly: true
157+
- mountPath: /etc/users-secret
158+
name: users-secret-file
159+
- mountPath: /etc/pbm/
160+
name: pbm-config
161+
readOnly: true
162+
workingDir: /data/db
163+
dnsPolicy: ClusterFirst
164+
initContainers:
165+
- command:
166+
- /init-entrypoint.sh
167+
imagePullPolicy: Always
168+
name: mongo-init
169+
resources:
170+
limits:
171+
cpu: 500m
172+
memory: 1G
173+
requests:
174+
cpu: 100m
175+
memory: 100M
176+
terminationMessagePath: /dev/termination-log
177+
terminationMessagePolicy: File
178+
volumeMounts:
179+
- mountPath: /data/db
180+
name: mongod-data
181+
- mountPath: /opt/percona
182+
name: bin
183+
- command:
184+
- bash
185+
- -c
186+
- install -D /usr/bin/pbm /opt/percona/pbm && install -D /usr/bin/pbm-agent /opt/percona/pbm-agent
187+
imagePullPolicy: Always
188+
name: pbm-init
189+
resources: {}
190+
terminationMessagePath: /dev/termination-log
191+
terminationMessagePolicy: File
192+
volumeMounts:
193+
- mountPath: /data/db
194+
name: mongod-data
195+
- mountPath: /opt/percona
196+
name: bin
197+
restartPolicy: Always
198+
schedulerName: default-scheduler
199+
securityContext: {}
200+
serviceAccount: default
201+
serviceAccountName: default
202+
terminationGracePeriodSeconds: 60
203+
volumes:
204+
- name: some-name-mongodb-keyfile
205+
secret:
206+
defaultMode: 288
207+
optional: false
208+
secretName: some-name-mongodb-keyfile
209+
- emptyDir: {}
210+
name: bin
211+
- configMap:
212+
defaultMode: 420
213+
name: some-name-rs0-mongod
214+
optional: true
215+
name: config
216+
- name: some-name-mongodb-encryption-key
217+
secret:
218+
defaultMode: 288
219+
optional: false
220+
secretName: some-name-mongodb-encryption-key
221+
- name: ssl
222+
secret:
223+
defaultMode: 288
224+
optional: false
225+
secretName: some-name-ssl
226+
- name: ssl-internal
227+
secret:
228+
defaultMode: 288
229+
optional: true
230+
secretName: some-name-ssl-internal
231+
- name: users-secret-file
232+
secret:
233+
defaultMode: 420
234+
secretName: internal-some-name-users
235+
- name: pbm-config
236+
secret:
237+
defaultMode: 420
238+
secretName: pbm-config
239+
updateStrategy:
240+
rollingUpdate:
241+
partition: 0
242+
type: RollingUpdate
243+
volumeClaimTemplates:
244+
- metadata:
245+
name: mongod-data
246+
spec:
247+
accessModes:
248+
- ReadWriteOnce
249+
resources:
250+
requests:
251+
storage: 3Gi
252+
status:
253+
phase: Pending

e2e-tests/functions

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,7 @@ wait_restore() {
317317
local cluster_name=$2
318318
local target_state=${3:-"ready"}
319319
local wait_cluster_consistency=${4:-1}
320+
local wait_time=${5:-780}
320321

321322
set +o xtrace
322323
retry=0
@@ -327,7 +328,7 @@ wait_restore() {
327328
echo -n .
328329
let retry+=1
329330
current_state=$(kubectl_bin get psmdb-restore restore-$backup_name -o jsonpath='{.status.state}')
330-
if [[ $retry -ge 780 || ${current_state} == 'error' ]]; then
331+
if [[ $retry -ge $wait_time || ${current_state} == 'error' ]]; then
331332
kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod) \
332333
| grep -v 'level=info' \
333334
| grep -v 'level=debug' \
@@ -1129,13 +1130,14 @@ check_mongo_auth() {
11291130

11301131
wait_cluster_consistency() {
11311132
local cluster_name=$1
1133+
local wait_time=${2:-32}
11321134

11331135
retry=0
11341136
sleep 7 # wait for two reconcile loops ;) 3 sec x 2 times + 1 sec = 7 seconds
11351137
echo -n 'waiting for cluster readyness'
11361138
until [[ "$(kubectl_bin get psmdb "${cluster_name}" -o jsonpath='{.status.state}')" == "ready" ]]; do
11371139
let retry+=1
1138-
if [ $retry -ge 32 ]; then
1140+
if [ $retry -ge $wait_time ]; then
11391141
echo max retry count $retry reached. something went wrong with operator or kubernetes cluster
11401142
exit 1
11411143
fi

0 commit comments

Comments
 (0)