Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion e2e-tests/data-at-rest-encryption/run
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ sleep 5

desc "check backup and restore -- minio"
backup_dest_minio=$(get_backup_dest "$backup_name_minio")
kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \
retry 3 5 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \
/usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \
/usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \
| grep myApp.test.gz
Expand Down
9 changes: 5 additions & 4 deletions e2e-tests/demand-backup-physical-sharded/run
Original file line number Diff line number Diff line change
Expand Up @@ -29,23 +29,24 @@ run_recovery_check() {
local backup_name=$1
local compare_suffix=${2:-"_restore"}

wait_restore "${backup_name}" "${cluster}" "requested" "0" "1200"
wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000"
echo

compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix}

# we don't wait for cluster readiness here because the annotation gets removed then
wait_restore "${backup_name}" "${cluster}" "ready" "0" "1800"
wait_restore "${backup_name}" "${cluster}" "ready" "0" "3000"
kubectl_bin get psmdb ${cluster} -o yaml
if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then
echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore"
exit 1
fi
echo

wait_cluster_consistency ${cluster} 42
wait_for_pbm_operations ${cluster}

compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-sharded"
echo
set -o xtrace
}

check_exported_mongos_service_endpoint() {
Expand Down
9 changes: 3 additions & 6 deletions e2e-tests/demand-backup-physical/run
Original file line number Diff line number Diff line change
Expand Up @@ -29,29 +29,26 @@ run_recovery_check() {
local backup_name=$1
local compare_suffix=${2:-"_restore"}

wait_restore "${backup_name}" "${cluster}" "requested" "0" "1200"
wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000"
echo

compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix}

# we don't wait for cluster readiness here because the annotation gets removed then
wait_restore "${backup_name}" "${cluster}" "ready" "0" "1800"
wait_restore "${backup_name}" "${cluster}" "ready" "0" "3000"

kubectl_bin get psmdb ${cluster} -o yaml
if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then
echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore"
exit 1
fi
echo

wait_cluster_consistency ${cluster}
wait_for_pbm_operations ${cluster}

compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.${cluster}-rs0.${namespace}"
compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.${cluster}-rs0.${namespace}"
compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.${cluster}-rs0.${namespace}"

echo
set -o xtrace
}

create_infra "${namespace}"
Expand Down
18 changes: 18 additions & 0 deletions e2e-tests/functions
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,24 @@ wait_backup() {
set_debug
}

wait_for_pbm_operations() {
local cluster=$1

set +o xtrace
echo -n "waiting for PBM operation to finish"
retry=0
until [[ $(kubectl_bin exec ${cluster}-rs0-0 -c backup-agent -- pbm status -o json -s running | jq -r .running.opID) == null ]]; do
if [ $retry -ge 540 ]; then
echo max retry count $retry reached. something went wrong with PBM operations
exit 1
fi
echo -n .
sleep 5
done
echo
set_debug
}

run_restore() {
local backup_name=$1

Expand Down
5 changes: 5 additions & 0 deletions e2e-tests/pvc-resize/run
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,11 @@ if [[ $EKS == 1 || -n ${OPENSHIFT} ]]; then
else
spinup_psmdb "${cluster}-rs0" "$test_dir/conf/$cluster.yml"
fi
echo "Enabling PVC resize after recreating PSMDB cluster ${cluster} "
kubectl_bin patch psmdb "${cluster}" --type=json -p='[{"op": "add", "path": "/spec/enableVolumeExpansion", "value":true }]'
sleep 10

wait_cluster_consistency "$cluster"
fi

desc 'create resourcequota'
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 1
labels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: mydb
app.kubernetes.io/managed-by: percona-server-mongodb-operator
app.kubernetes.io/name: percona-server-mongodb
app.kubernetes.io/part-of: percona-server-mongodb
app.kubernetes.io/replset: rs0
name: mydb-rs0
ownerReferences:
- controller: true
kind: PerconaServerMongoDB
name: mydb
spec:
podManagementPolicy: OrderedReady
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: mydb
app.kubernetes.io/managed-by: percona-server-mongodb-operator
app.kubernetes.io/name: percona-server-mongodb
app.kubernetes.io/part-of: percona-server-mongodb
app.kubernetes.io/replset: rs0
serviceName: mydb-rs0
template:
metadata:
annotations: {}
labels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: mydb
app.kubernetes.io/managed-by: percona-server-mongodb-operator
app.kubernetes.io/name: percona-server-mongodb
app.kubernetes.io/part-of: percona-server-mongodb
app.kubernetes.io/replset: rs0
spec:
containers:
- args:
- --bind_ip_all
- --auth
- --dbpath=/data/db
- --port=27017
- --replSet=rs0
- --storageEngine=wiredTiger
- --relaxPermChecks
- --sslAllowInvalidCertificates
- --clusterAuthMode=keyFile
- --keyFile=/etc/mongodb-secrets/mongodb-key
- --tlsMode=allowTLS
- --enableEncryption
- --encryptionKeyFile=/etc/mongodb-encryption/encryption-key
- --wiredTigerCacheSizeGB=0.25
- --wiredTigerIndexPrefixCompression=true
- --config=/etc/mongodb-config/mongod.conf
- --quiet
command:
- /opt/percona/ps-entry.sh
env:
- name: SERVICE_NAME
value: mydb
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_REPLSET
value: rs0
envFrom:
- secretRef:
name: internal-mydb-users
optional: false
imagePullPolicy: Always
livenessProbe:
exec:
command:
- /opt/percona/mongodb-healthcheck
- k8s
- liveness
- --ssl
- --sslInsecure
- --sslCAFile
- /etc/mongodb-ssl/ca.crt
- --sslPEMKeyFile
- /tmp/tls.pem
- --startupDelaySeconds
- "7200"
failureThreshold: 4
initialDelaySeconds: 60
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 10
name: mongod
ports:
- containerPort: 27017
name: mongodb
protocol: TCP
readinessProbe:
exec:
command:
- /opt/percona/mongodb-healthcheck
- k8s
- readiness
- --component
- mongod
failureThreshold: 8
initialDelaySeconds: 10
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 2
resources:
limits:
cpu: 300m
memory: 500M
requests:
cpu: 300m
memory: 500M
securityContext:
runAsNonRoot: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data/db
name: mongod-data
- mountPath: /etc/mongodb-secrets
name: mydb-custom-mongodb-keyfile
readOnly: true
- mountPath: /etc/mongodb-ssl
name: ssl
readOnly: true
- mountPath: /etc/mongodb-ssl-internal
name: ssl-internal
readOnly: true
- mountPath: /etc/mongodb-config
name: config
- mountPath: /opt/percona
name: bin
- mountPath: /etc/mongodb-encryption
name: mydb-custom-encryption-key
readOnly: true
- mountPath: /etc/users-secret
name: users-secret-file
workingDir: /data/db
dnsPolicy: ClusterFirst
initContainers:
- command:
- /init-entrypoint.sh
imagePullPolicy: Always
name: mongo-init
resources:
limits:
cpu: 300m
memory: 500M
requests:
cpu: 300m
memory: 500M
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data/db
name: mongod-data
- mountPath: /opt/percona
name: bin
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 60
volumes:
- name: mydb-custom-mongodb-keyfile
secret:
defaultMode: 288
optional: false
secretName: mydb-custom-mongodb-keyfile
- emptyDir: {}
name: bin
- configMap:
defaultMode: 420
name: mydb-rs0-mongod
optional: true
name: config
- name: mydb-custom-encryption-key
secret:
defaultMode: 288
optional: false
secretName: mydb-custom-encryption-key
- name: ssl
secret:
defaultMode: 288
optional: false
secretName: mydb-custom-ssl
- name: ssl-internal
secret:
defaultMode: 288
optional: true
secretName: mydb-custom-ssl-internal
- name: users-secret-file
secret:
defaultMode: 420
secretName: internal-mydb-users
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
volumeClaimTemplates:
- metadata:
name: mongod-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
status:
phase: Pending
4 changes: 2 additions & 2 deletions e2e-tests/serviceless-external-nodes/run
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ apply_cluster "$test_dir/conf/main.yml"
wait_for_running "$cluster-rs0" 1
compare_kubectl statefulset/mydb-rs0

secrets_count=$(kubectl_bin get secret -o yaml | yq '.items | length')
secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length')
if [[ $secrets_count != 6 ]]; then
echo "It's expected to have 6 secrets. Currently have $secrets_count"
exit 1
Expand All @@ -41,7 +41,7 @@ apply_cluster "$test_dir/conf/external.yml"
wait_pod ${cluster}-rs0-0
wait_pod ${cluster}-rs0-1

secrets_count=$(kubectl_bin get secret -o yaml | yq '.items | length')
secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length')
if [[ $secrets_count != 6 ]]; then
echo "It's expected to have 6 secrets. Currently have $secrets_count"
exit 1
Expand Down