@@ -54,34 +54,46 @@ wait_for_delete() {
54
54
done
55
55
}
56
56
57
+ apply_crd () {
58
+ kubectl -n " ${OPERATOR_NS:- $NAMESPACE } " apply --server-side --force-conflicts -f " ${DEPLOY_DIR} /crd.yaml"
59
+ }
60
+
61
+ apply_rbac () {
62
+ local rbac_file
63
+
64
+ if [ -n " $OPERATOR_NS " ]; then
65
+ rbac_file=" ${DEPLOY_DIR} /cw-rbac.yaml"
66
+ else
67
+ rbac_file=" ${DEPLOY_DIR} /rbac.yaml"
68
+ fi
69
+
70
+ kubectl -n " ${OPERATOR_NS:- $NAMESPACE } " apply -f " ${rbac_file} "
71
+ }
72
+
57
73
deploy_operator () {
58
74
destroy_operator
59
75
60
76
if [[ $OPERATOR_NS ]]; then
61
77
create_namespace " ${OPERATOR_NS} "
62
78
fi
63
79
64
- kubectl -n " ${OPERATOR_NS:- $NAMESPACE } " apply --server-side --force-conflicts -f " ${DEPLOY_DIR} /crd.yaml"
80
+ apply_crd
81
+ apply_rbac
82
+
83
+ local operator_file
65
84
66
85
if [ -n " $OPERATOR_NS " ]; then
67
- kubectl -n " ${OPERATOR_NS:- $NAMESPACE } " apply -f " ${DEPLOY_DIR} /cw-rbac.yaml"
68
-
69
- yq eval \
70
- " $( printf ' select(documentIndex==1).spec.template.spec.containers[0].image="%s"' " ${IMAGE} " ) " \
71
- " ${DEPLOY_DIR} /cw-operator.yaml" \
72
- | yq eval ' (select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' \
73
- | yq eval ' (select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' \
74
- | kubectl -n " ${OPERATOR_NS:- $NAMESPACE } " apply -f -
86
+ operator_file=" ${DEPLOY_DIR} /cw-operator.yaml"
75
87
else
76
- kubectl -n " ${OPERATOR_NS:- $NAMESPACE } " apply -f " ${DEPLOY_DIR} /rbac.yaml"
77
-
78
- yq eval \
79
- " $( printf ' select(documentIndex==1).spec.template.spec.containers[0].image="%s"' " ${IMAGE} " ) " \
80
- " ${DEPLOY_DIR} /operator.yaml" \
81
- | yq eval ' (select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' \
82
- | yq eval ' (select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' \
83
- | kubectl -n " ${OPERATOR_NS:- $NAMESPACE } " apply -f -
88
+ operator_file=" ${DEPLOY_DIR} /-operator.yaml"
84
89
fi
90
+
91
+ yq eval \
92
+ " $( printf ' select(documentIndex==1).spec.template.spec.containers[0].image="%s"' " ${IMAGE} " ) " \
93
+ " ${operator_file} " \
94
+ | yq eval ' (select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' \
95
+ | yq eval ' (select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' \
96
+ | kubectl -n " ${OPERATOR_NS:- $NAMESPACE } " apply -f -
85
97
}
86
98
87
99
upgrade_operator_image () {
@@ -136,8 +148,8 @@ deploy_tls_cluster_secrets() {
136
148
}
137
149
138
150
deploy_client () {
139
- yq eval " $( printf ' .spec.containers[0].image="%s"' " ${IMAGE_MYSQL} " ) " " ${TESTS_CONFIG_DIR} /client.yaml" | \
140
- kubectl -n " ${NAMESPACE} " apply -f -
151
+ yq eval " $( printf ' .spec.containers[0].image="%s"' " ${IMAGE_MYSQL} " ) " " ${TESTS_CONFIG_DIR} /client.yaml" \
152
+ | kubectl -n " ${NAMESPACE} " apply -f -
141
153
}
142
154
143
155
apply_s3_storage_secrets () {
@@ -383,8 +395,9 @@ get_cr() {
383
395
local image_toolkit=${6:- ${IMAGE_TOOLKIT} }
384
396
local image_haproxy=${7:- ${IMAGE_HAPROXY} }
385
397
local image_pmm_client=${8:- ${IMAGE_PMM_CLIENT} }
398
+ local cr_file=${9:- ${DEPLOY_DIR} / cr.yaml}
386
399
387
- yq eval " $( printf ' .metadata.name="%s"' " ${test_name}${name_suffix: +-$name_suffix } " ) " " ${DEPLOY_DIR} /cr.yaml " \
400
+ yq eval " $( printf ' .metadata.name="%s"' " ${test_name}${name_suffix: +-$name_suffix } " ) " ${cr_file} \
388
401
| yq eval " $( printf ' .spec.initImage="%s"' " ${IMAGE} " ) " - \
389
402
| yq eval ' .spec.secretsName="test-secrets"' - \
390
403
| yq eval ' .spec.sslSecretName="test-ssl"' - \
@@ -461,11 +474,11 @@ get_innodb_cluster_status() {
461
474
wait_until_innodb_ok () {
462
475
local uri=" $1 "
463
476
464
- local retry=0
465
- until [[ $( get_innodb_cluster_status ${uri} | jq -r .defaultReplicaSet.status) == " OK" ]]; do
466
- sleep 5
467
- retry=$(( retry + 1 ))
468
- done
477
+ local retry=0
478
+ until [[ $( get_innodb_cluster_status ${uri} | jq -r .defaultReplicaSet.status) == " OK" ]]; do
479
+ sleep 5
480
+ retry=$(( retry + 1 ))
481
+ done
469
482
}
470
483
471
484
run_curl () {
@@ -477,8 +490,7 @@ get_innodb_cluster_name() {
477
490
}
478
491
479
492
get_mysqlsh_uri_for_pod () {
480
- local pod=$1
481
-
493
+ local pod=$1
482
494
483
495
echo " root:root_password@${pod} .$( get_cluster_name) -mysql.${NAMESPACE} "
484
496
}
@@ -673,7 +685,7 @@ wait_cluster_consistency_gr() {
673
685
674
686
wait_pod () {
675
687
local pod=$1
676
- local ns=${2:- ${NAMESPACE} }
688
+ local ns=${2:- ${NAMESPACE} }
677
689
678
690
set +o xtrace
679
691
retry=0
@@ -952,120 +964,120 @@ network_loss() {
952
964
}
953
965
954
966
wait_until_chaos_applied () {
955
- local chaos_type=$1
956
- local chaos_name=$2
957
-
958
- local resource
959
- case ${chaos_type} in
960
- " kill" | " failure" | " full-cluster-crash" )
961
- resource=podchaos/${chaos_name}
962
- ;;
963
- " network" )
964
- resource=networkchaos/${chaos_name}
965
- ;;
966
- esac
967
-
968
- local retry=0
969
- until [[ ${retry} == 30 ]]; do
970
- sleep 10
971
- retry=$(( retry + 1 ))
972
-
973
- succeeded=$( kubectl -n ${NAMESPACE} get ${resource} -o yaml \
974
- | yq ' .status.experiment.containerRecords[].events[]
967
+ local chaos_type=$1
968
+ local chaos_name=$2
969
+
970
+ local resource
971
+ case ${chaos_type} in
972
+ " kill" | " failure" | " full-cluster-crash" )
973
+ resource=podchaos/${chaos_name}
974
+ ;;
975
+ " network" )
976
+ resource=networkchaos/${chaos_name}
977
+ ;;
978
+ esac
979
+
980
+ local retry=0
981
+ until [[ ${retry} == 30 ]]; do
982
+ sleep 10
983
+ retry=$(( retry + 1 ))
984
+
985
+ succeeded=$( kubectl -n ${NAMESPACE} get ${resource} -o yaml \
986
+ | yq ' .status.experiment.containerRecords[].events[]
975
987
| select(.operation == "Apply" and .type == "Succeeded")' )
976
988
977
- if [[ -n ${succeeded} ]]; then
978
- return
979
- fi
980
- done
989
+ if [[ -n ${succeeded} ]]; then
990
+ return
991
+ fi
992
+ done
981
993
982
- echo " Timeout (300s) exceeded while waiting for chaos to be applied"
983
- exit 1
994
+ echo " Timeout (300s) exceeded while waiting for chaos to be applied"
995
+ exit 1
984
996
}
985
997
986
998
wait_until_chaos_recovered () {
987
- local chaos_type=$1
988
- local chaos_name=$2
989
-
990
- local resource
991
- case ${chaos_type} in
992
- " kill" | " failure" )
993
- resource=podchaos/${chaos_name}
994
- ;;
995
- " network" )
996
- resource=networkchaos/${chaos_name}
997
- ;;
998
- esac
999
-
1000
- local retry=0
1001
- until [[ ${retry} == 30 ]]; do
1002
- sleep 10
1003
- retry=$(( retry + 1 ))
1004
-
1005
- succeeded=$( kubectl -n ${NAMESPACE} get ${resource} -o yaml \
1006
- | yq ' .status.experiment.containerRecords[].events[]
999
+ local chaos_type=$1
1000
+ local chaos_name=$2
1001
+
1002
+ local resource
1003
+ case ${chaos_type} in
1004
+ " kill" | " failure" )
1005
+ resource=podchaos/${chaos_name}
1006
+ ;;
1007
+ " network" )
1008
+ resource=networkchaos/${chaos_name}
1009
+ ;;
1010
+ esac
1011
+
1012
+ local retry=0
1013
+ until [[ ${retry} == 30 ]]; do
1014
+ sleep 10
1015
+ retry=$(( retry + 1 ))
1016
+
1017
+ succeeded=$( kubectl -n ${NAMESPACE} get ${resource} -o yaml \
1018
+ | yq ' .status.experiment.containerRecords[].events[]
1007
1019
| select(.operation == "Recover" and .type == "Succeeded")' )
1008
1020
1009
- if [[ -n ${succeeded} ]]; then
1010
- return
1011
- fi
1012
- done
1021
+ if [[ -n ${succeeded} ]]; then
1022
+ return
1023
+ fi
1024
+ done
1013
1025
1014
- echo " Timeout (300s) exceeded while waiting for chaos to be recovered"
1015
- exit 1
1026
+ echo " Timeout (300s) exceeded while waiting for chaos to be recovered"
1027
+ exit 1
1016
1028
}
1017
1029
1018
1030
check_primary_chaos () {
1019
- local chaos_type=$1
1020
- local ns=$2
1021
- local primary_before_failure=$3
1022
-
1023
- local chaos_name
1024
- case ${chaos_type} in
1025
- " kill" )
1026
- chaos_name=" chaos-pod-kill-primary"
1027
- kill_pods " ${ns} " " pod" " ${primary_before_failure} " " " " ${chaos_name} "
1028
- ;;
1029
- " full-cluster-crash" )
1030
- chaos_name=" chaos-kill-label-cluster-crash"
1031
- kill_pods " ${ns} " " label" " app.kubernetes.io/instance" " gr-self-healing" " ${chaos_name} "
1032
- ;;
1033
- " failure" )
1034
- chaos_name=" chaos-pod-failure-primary"
1035
- failure_pod " ${ns} " " ${primary_before_failure} " " ${chaos_name} "
1036
- ;;
1037
- " network" )
1038
- chaos_name=" chaos-pod-network-loss-primary"
1039
- network_loss " ${ns} " " ${primary_before_failure} " " ${chaos_name} "
1040
- ;;
1041
- esac
1042
-
1043
- wait_until_chaos_applied ${chaos_type} ${chaos_name}
1044
- if [[ ${chaos_type} == " failure" || ${chaos_type} == " network" ]]; then
1045
- wait_until_chaos_recovered ${chaos_type} ${chaos_name}
1046
- fi
1047
-
1048
- wait_cluster_consistency_gr " $( get_cluster_name) " 3 3
1049
-
1050
- primary_after_failure=$( get_primary_from_group_replication)
1051
- uri=$( get_mysqlsh_uri_for_pod ${primary_after_failure} )
1052
- wait_until_innodb_ok ${uri}
1053
-
1054
- if [[ " ${primary_before_failure} " == " ${primary_after_failure} " ]]; then
1055
- echo " primary pod was not killed! something went wrong."
1056
- exit 1
1057
- fi
1058
-
1059
- uri=$( get_mysqlsh_uri_for_pod $( get_primary_from_group_replication) )
1060
- online_members=$( get_innodb_cluster_status ${uri} \
1061
- | jq .defaultReplicaSet.topology[].status \
1062
- | grep ONLINE \
1063
- | wc -l)
1064
-
1065
- if [[ ${online_members} != 3 ]]; then
1066
- echo " expected 3 online members, got ${online_members} "
1067
- exit 1
1068
- fi
1031
+ local chaos_type=$1
1032
+ local ns=$2
1033
+ local primary_before_failure=$3
1034
+
1035
+ local chaos_name
1036
+ case ${chaos_type} in
1037
+ " kill" )
1038
+ chaos_name=" chaos-pod-kill-primary"
1039
+ kill_pods " ${ns} " " pod" " ${primary_before_failure} " " " " ${chaos_name} "
1040
+ ;;
1041
+ " full-cluster-crash" )
1042
+ chaos_name=" chaos-kill-label-cluster-crash"
1043
+ kill_pods " ${ns} " " label" " app.kubernetes.io/instance" " gr-self-healing" " ${chaos_name} "
1044
+ ;;
1045
+ " failure" )
1046
+ chaos_name=" chaos-pod-failure-primary"
1047
+ failure_pod " ${ns} " " ${primary_before_failure} " " ${chaos_name} "
1048
+ ;;
1049
+ " network" )
1050
+ chaos_name=" chaos-pod-network-loss-primary"
1051
+ network_loss " ${ns} " " ${primary_before_failure} " " ${chaos_name} "
1052
+ ;;
1053
+ esac
1054
+
1055
+ wait_until_chaos_applied ${chaos_type} ${chaos_name}
1056
+ if [[ ${chaos_type} == " failure" || ${chaos_type} == " network" ]]; then
1057
+ wait_until_chaos_recovered ${chaos_type} ${chaos_name}
1058
+ fi
1059
+
1060
+ wait_cluster_consistency_gr " $( get_cluster_name) " 3 3
1061
+
1062
+ primary_after_failure=$( get_primary_from_group_replication)
1063
+ uri=$( get_mysqlsh_uri_for_pod ${primary_after_failure} )
1064
+ wait_until_innodb_ok ${uri}
1065
+
1066
+ if [[ ${primary_before_failure} == " ${primary_after_failure} " ]]; then
1067
+ echo " primary pod was not killed! something went wrong."
1068
+ exit 1
1069
+ fi
1070
+
1071
+ uri=$( get_mysqlsh_uri_for_pod $( get_primary_from_group_replication) )
1072
+ online_members=$( get_innodb_cluster_status ${uri} \
1073
+ | jq .defaultReplicaSet.topology[].status \
1074
+ | grep ONLINE \
1075
+ | wc -l)
1076
+
1077
+ if [[ ${online_members} != 3 ]]; then
1078
+ echo " expected 3 online members, got ${online_members} "
1079
+ exit 1
1080
+ fi
1069
1081
}
1070
1082
1071
1083
renew_certificate () {
@@ -1204,6 +1216,7 @@ check_scheduled_backup_labels() {
1204
1216
echo " Label percona.com/backup-ancestor is missing"
1205
1217
exit 1
1206
1218
fi
1219
+ }
1207
1220
1208
1221
latest_operator_version_in_vs () {
1209
1222
local latest=$( curl -s https://check.percona.com/versions/v1/ps-operator | jq -r ' .versions[].operator' | sort -V | tail -n1)
@@ -1235,12 +1248,18 @@ get_cr_with_latest_versions_in_vs() {
1235
1248
local image_haproxy=$( echo ${latest_versions} | jq -r ' .versions[].matrix.haproxy[].imagePath' )
1236
1249
local image_pmm_client=$( echo ${latest_versions} | jq -r ' .versions[].matrix.pmm[].imagePath' )
1237
1250
1251
+ local git_tag=" v${version} "
1252
+
1253
+ curl " https://raw.githubusercontent.com/percona/percona-server-mysql-operator/${git_tag} /deploy/cr.yaml" \
1254
+ -o ${TEMP_DIR} /cr.yaml
1255
+
1238
1256
get_cr " " \
1239
1257
${image_mysql} \
1240
1258
${image_backup} \
1241
1259
${image_orchestrator} \
1242
1260
${image_router} \
1243
1261
${image_toolkit} \
1244
1262
${image_haproxy} \
1245
- ${image_pmm_client}
1263
+ ${image_pmm_client} \
1264
+ ${TEMP_DIR} /cr.yaml
1246
1265
}
0 commit comments