Skip to content

Commit 97b510d

Browse files
nmarukovichhorsjvpasinatto
authored
K8SPXC-1683 add pmm and 8.4 to smart-update test and K8SPXC-1712 add 8.4 to pr check. (#2289)
* K8SPXC-1683 add pmm and 8.4 to smart-update test * separate tests * add to test * delete pitr * fix the test * fix * for debug * debug * debug * debug * fix smart-update * add retries * add debug info * add debug * fix test * update test * fix images * fix test * fix test * fix pmm version * delete unnessesary set -x --------- Co-authored-by: Viacheslav Sarzhan <slava.sarzhan@percona.com> Co-authored-by: Julio Pasinatto <julio.pasinatto@percona.com>
1 parent 6ee0ce5 commit 97b510d

24 files changed

+1098
-395
lines changed

e2e-tests/functions

Lines changed: 131 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ export IMAGE_BACKUP=${IMAGE_BACKUP:-"perconalab/percona-xtradb-cluster-operator:
2323
export IMAGE_LOGCOLLECTOR=${IMAGE_LOGCOLLECTOR:-"perconalab/fluentbit:main-logcollector"}
2424
export IMAGE_PMM_CLIENT=${IMAGE_PMM_CLIENT:-"perconalab/pmm-client:dev-latest"}
2525
export IMAGE_PMM_SERVER=${IMAGE_PMM_SERVER:-"perconalab/pmm-server:dev-latest"}
26-
export IMAGE_PMM3_CLIENT=${IMAGE_PMM3_CLIENT:-"perconalab/pmm-client:3.1.0"}
27-
export IMAGE_PMM3_SERVER=${IMAGE_PMM3_SERVER:-"perconalab/pmm-server:3.1.0"}
26+
export IMAGE_PMM3_CLIENT=${IMAGE_PMM3_CLIENT:-"perconalab/pmm-client:3-dev-latest"}
27+
export IMAGE_PMM3_SERVER=${IMAGE_PMM3_SERVER:-"perconalab/pmm-server:3-dev-latest"}
2828

2929
if oc get projects 2>/dev/null; then
3030
OPENSHIFT=$(oc version -o json | jq -r '.openshiftVersion' | grep -oE '^[0-9]+\.[0-9]+')
@@ -894,6 +894,39 @@ get_metric_values() {
894894

895895
}
896896

897+
get_metric_values_pmm3() {
898+
local metric=$1
899+
local instance=$2
900+
local token=$3
901+
local start=$($date -u "+%s" -d "-1 minute")
902+
local end=$($date -u "+%s")
903+
local endpoint=$(get_service_endpoint monitoring-service)
904+
905+
if [ -z "$metric" ]; then
906+
echo "Error: metric is required"
907+
exit 1
908+
fi
909+
910+
if [ -z "$token" ]; then
911+
echo "Error: token is required"
912+
exit 1
913+
fi
914+
915+
local wait_count=30
916+
local retry=0
917+
until [[ $(curl -s -k -H "Authorization: Bearer ${token}" "https://$endpoint/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28$metric%7Bnode_name%3D%7E%22$instance%22%7d%20or%20$metric%7Bnode_name%3D%7E%22$instance%22%7D%29&start=$start&end=$end&step=60" \
918+
| jq '.data.result[0].values[][1]' \
919+
| grep '^"[0-9]') ]]; do
920+
sleep 2
921+
local start=$($date -u "+%s" -d "-1 minute")
922+
local end=$($date -u "+%s")
923+
let retry+=1
924+
if [[ $retry -ge $wait_count ]]; then
925+
exit 1
926+
fi
927+
done
928+
}
929+
897930
get_qan20_values() {
898931
local instance=$1
899932
local user_pass=$2
@@ -2082,6 +2115,39 @@ deploy_pmm_server() {
20822115
wait_for_pmm_service
20832116
}
20842117

2118+
deploy_pmm3_server() {
2119+
helm uninstall -n "${NAMESPACE}" monitoring || :
2120+
helm repo remove percona || :
2121+
kubectl delete clusterrole monitoring --ignore-not-found
2122+
kubectl delete clusterrolebinding monitoring --ignore-not-found
2123+
helm repo add percona https://percona.github.io/percona-helm-charts/
2124+
helm repo update
2125+
2126+
if [ ! -z "$OPENSHIFT" ]; then
2127+
oc create sa pmm-server
2128+
oc adm policy add-scc-to-user privileged -z pmm-server
2129+
if [[ $OPERATOR_NS ]]; then
2130+
timeout 30 oc delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'pmm-pxc-operator-' | awk '{print $1}') || :
2131+
oc create clusterrolebinding pmm-pxc-operator-cluster-wide --clusterrole=percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server
2132+
oc patch clusterrole/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' -n $OPERATOR_NS
2133+
else
2134+
oc create rolebinding pmm-pxc-operator-namespace-only --role percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server
2135+
oc patch role/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]'
2136+
fi
2137+
local additional_params="--set platform=openshift --set supresshttp2=false --set serviceAccount.create=false --set serviceAccount.name=pmm-server"
2138+
fi
2139+
2140+
retry 10 60 helm install monitoring percona/pmm -n "${NAMESPACE}" \
2141+
--set fullnameOverride=monitoring \
2142+
--set image.tag=${IMAGE_PMM3_SERVER#*:} \
2143+
--set image.repository=${IMAGE_PMM3_SERVER%:*} \
2144+
--set service.type=LoadBalancer \
2145+
$additional_params \
2146+
--force
2147+
2148+
wait_for_pmm_service
2149+
}
2150+
20852151
wait_for_pmm_service() {
20862152
timeout=420
20872153
start=$(date +%s)
@@ -2097,6 +2163,69 @@ wait_for_pmm_service() {
20972163
kubectl_bin wait sts/monitoring --for=jsonpath='{.status.readyReplicas}'=1 --timeout=${timeout}s
20982164
}
20992165

2166+
get_pmm_server_token() {
2167+
local key_name=$1
2168+
2169+
if [[ -z $key_name ]]; then
2170+
key_name="operator"
2171+
fi
2172+
2173+
local ADMIN_PASSWORD
2174+
ADMIN_PASSWORD=$(kubectl get secret pmm-secret -o jsonpath="{.data.PMM_ADMIN_PASSWORD}" | base64 --decode)
2175+
2176+
if [[ -z $ADMIN_PASSWORD ]]; then
2177+
echo "Error: ADMIN_PASSWORD is empty or not found!" >&2
2178+
return 1
2179+
fi
2180+
2181+
local create_response create_status_code create_json_response
2182+
local retry=0
2183+
until [[ $create_status_code == 201 ]]; do
2184+
create_response=$(curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' \
2185+
-d "{\"name\":\"${key_name}\", \"role\":\"Admin\", \"isDisabled\":false}" \
2186+
--user "admin:${ADMIN_PASSWORD}" \
2187+
"https://$(get_service_ip monitoring-service)/graph/api/serviceaccounts" \
2188+
-w "\n%{http_code}")
2189+
2190+
create_status_code=$(echo "$create_response" | tail -n1)
2191+
create_json_response=$(echo "$create_response" | sed '$ d')
2192+
2193+
sleep 5
2194+
let retry+=1
2195+
if [ "$retry" -ge 24 ]; then
2196+
echo "Error: Failed to create PMM service account. HTTP Status: $create_status_code" >&2
2197+
echo "Response: $create_json_response" >&2
2198+
return 1
2199+
fi
2200+
done
2201+
2202+
local service_account_id
2203+
service_account_id=$(echo "$create_json_response" | jq -r '.id')
2204+
2205+
if [[ -z $service_account_id || $service_account_id == "null" ]]; then
2206+
echo "Error: Failed to extract service account ID!" >&2
2207+
return 1
2208+
fi
2209+
2210+
local token_response token_status_code token_json_response
2211+
token_response=$(curl --insecure -s -X POST -H 'Content-Type: application/json' \
2212+
-d "{\"name\":\"${key_name}\"}" \
2213+
--user "admin:${ADMIN_PASSWORD}" \
2214+
"https://$(get_service_ip monitoring-service)/graph/api/serviceaccounts/${service_account_id}/tokens" \
2215+
-w "\n%{http_code}")
2216+
2217+
token_status_code=$(echo "$token_response" | tail -n1)
2218+
token_json_response=$(echo "$token_response" | sed '$ d')
2219+
2220+
if [[ $token_status_code -ne 200 ]]; then
2221+
echo "Error: Failed to create token. HTTP Status: $token_status_code" >&2
2222+
echo "Response: $token_json_response" >&2
2223+
return 1
2224+
fi
2225+
2226+
echo "$token_json_response" | jq -r '.key'
2227+
}
2228+
21002229
run_recovery_check_pitr() {
21012230
local cluster=$1
21022231
local restore=$2

e2e-tests/monitoring-pmm3/run

Lines changed: 4 additions & 133 deletions
Original file line numberDiff line numberDiff line change
@@ -7,39 +7,6 @@ test_dir=$(realpath $(dirname $0))
77

88
set_debug
99

10-
deploy_pmm3_server() {
11-
helm uninstall -n "${NAMESPACE}" monitoring || :
12-
helm repo remove percona || :
13-
kubectl delete clusterrole monitoring --ignore-not-found
14-
kubectl delete clusterrolebinding monitoring --ignore-not-found
15-
helm repo add percona https://percona.github.io/percona-helm-charts/
16-
helm repo update
17-
18-
if [ ! -z "$OPENSHIFT" ]; then
19-
oc create sa pmm-server
20-
oc adm policy add-scc-to-user privileged -z pmm-server
21-
if [[ $OPERATOR_NS ]]; then
22-
timeout 30 oc delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'pmm-pxc-operator-' | awk '{print $1}') || :
23-
oc create clusterrolebinding pmm-pxc-operator-cluster-wide --clusterrole=percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server
24-
oc patch clusterrole/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' -n $OPERATOR_NS
25-
else
26-
oc create rolebinding pmm-pxc-operator-namespace-only --role percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server
27-
oc patch role/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]'
28-
fi
29-
local additional_params="--set platform=openshift --set supresshttp2=false --set serviceAccount.create=false --set serviceAccount.name=pmm-server"
30-
fi
31-
32-
retry 10 60 helm install monitoring percona/pmm -n "${NAMESPACE}" \
33-
--set fullnameOverride=monitoring \
34-
--set image.tag=${IMAGE_PMM3_SERVER#*:} \
35-
--set image.repository=${IMAGE_PMM3_SERVER%:*} \
36-
--set service.type=LoadBalancer \
37-
$additional_params \
38-
--force
39-
40-
wait_for_pmm_service
41-
}
42-
4310
spinup_pxc() {
4411
local cluster=$1
4512
local config=$2
@@ -89,69 +56,6 @@ spinup_pxc() {
8956
fi
9057
}
9158

92-
get_pmm_server_token() {
93-
local key_name=$1
94-
95-
if [[ -z $key_name ]]; then
96-
key_name="operator"
97-
fi
98-
99-
local ADMIN_PASSWORD
100-
ADMIN_PASSWORD=$(kubectl get secret pmm-secret -o jsonpath="{.data.PMM_ADMIN_PASSWORD}" | base64 --decode)
101-
102-
if [[ -z $ADMIN_PASSWORD ]]; then
103-
echo "Error: ADMIN_PASSWORD is empty or not found!" >&2
104-
return 1
105-
fi
106-
107-
local create_response create_status_code create_json_response
108-
local retry=0
109-
until [[ $create_status_code == 201 ]]; do
110-
create_response=$(curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' \
111-
-d "{\"name\":\"${key_name}\", \"role\":\"Admin\", \"isDisabled\":false}" \
112-
--user "admin:${ADMIN_PASSWORD}" \
113-
"https://$(get_service_ip monitoring-service)/graph/api/serviceaccounts" \
114-
-w "\n%{http_code}")
115-
116-
create_status_code=$(echo "$create_response" | tail -n1)
117-
create_json_response=$(echo "$create_response" | sed '$ d')
118-
119-
sleep 5
120-
let retry+=1
121-
if [ "$retry" -ge 24 ]; then
122-
echo "Error: Failed to create PMM service account. HTTP Status: $create_status_code" >&2
123-
echo "Response: $create_json_response" >&2
124-
return 1
125-
fi
126-
done
127-
128-
local service_account_id
129-
service_account_id=$(echo "$create_json_response" | jq -r '.id')
130-
131-
if [[ -z $service_account_id || $service_account_id == "null" ]]; then
132-
echo "Error: Failed to extract service account ID!" >&2
133-
return 1
134-
fi
135-
136-
local token_response token_status_code token_json_response
137-
token_response=$(curl --insecure -s -X POST -H 'Content-Type: application/json' \
138-
-d "{\"name\":\"${key_name}\"}" \
139-
--user "admin:${ADMIN_PASSWORD}" \
140-
"https://$(get_service_ip monitoring-service)/graph/api/serviceaccounts/${service_account_id}/tokens" \
141-
-w "\n%{http_code}")
142-
143-
token_status_code=$(echo "$token_response" | tail -n1)
144-
token_json_response=$(echo "$token_response" | sed '$ d')
145-
146-
if [[ $token_status_code -ne 200 ]]; then
147-
echo "Error: Failed to create token. HTTP Status: $token_status_code" >&2
148-
echo "Response: $token_json_response" >&2
149-
return 1
150-
fi
151-
152-
echo "$token_json_response" | jq -r '.key'
153-
}
154-
15559
verify_custom_cluster_name() {
15660
local expected_cluster=$1
15761
local token=$2
@@ -263,39 +167,6 @@ delete_pmm_server_token() {
263167
fi
264168
}
265169

266-
get_metric_values() {
267-
local metric=$1
268-
local instance=$2
269-
local token=$3
270-
local start=$($date -u "+%s" -d "-1 minute")
271-
local end=$($date -u "+%s")
272-
local endpoint=$(get_service_endpoint monitoring-service)
273-
274-
if [ -z "$metric" ]; then
275-
echo "Error: metric is required"
276-
exit 1
277-
fi
278-
279-
if [ -z "$token" ]; then
280-
echo "Error: token is required"
281-
exit 1
282-
fi
283-
284-
local wait_count=30
285-
local retry=0
286-
until [[ $(curl -s -k -H "Authorization: Bearer ${token}" "https://$endpoint/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28$metric%7Bnode_name%3D%7E%22$instance%22%7d%20or%20$metric%7Bnode_name%3D%7E%22$instance%22%7D%29&start=$start&end=$end&step=60" \
287-
| jq '.data.result[0].values[][1]' \
288-
| grep '^"[0-9]') ]]; do
289-
sleep 2
290-
local start=$($date -u "+%s" -d "-1 minute")
291-
local end=$($date -u "+%s")
292-
let retry+=1
293-
if [[ $retry -ge $wait_count ]]; then
294-
exit 1
295-
fi
296-
done
297-
}
298-
299170
get_qan20_values() {
300171
local instance=$1
301172
local token=$2
@@ -404,12 +275,12 @@ compare_kubectl statefulset/$cluster-haproxy
404275

405276
desc 'check mysql metrics'
406277
sleep 60
407-
get_metric_values node_boot_time_seconds pxc-prefix-$namespace-$cluster-pxc-0 $NEW_TOKEN
408-
get_metric_values mysql_global_status_uptime pxc-prefix-$namespace-$cluster-pxc-0 $NEW_TOKEN
278+
get_metric_values_pmm3 node_boot_time_seconds pxc-prefix-$namespace-$cluster-pxc-0 $NEW_TOKEN
279+
get_metric_values_pmm3 mysql_global_status_uptime pxc-prefix-$namespace-$cluster-pxc-0 $NEW_TOKEN
409280

410281
desc 'check haproxy metrics'
411-
get_metric_values haproxy_backend_status pxc-prefix-$namespace-$cluster-haproxy-0 $NEW_TOKEN
412-
get_metric_values haproxy_backend_active_servers pxc-prefix-$namespace-$cluster-haproxy-0 $NEW_TOKEN
282+
get_metric_values_pmm3 haproxy_backend_status pxc-prefix-$namespace-$cluster-haproxy-0 $NEW_TOKEN
283+
get_metric_values_pmm3 haproxy_backend_active_servers pxc-prefix-$namespace-$cluster-haproxy-0 $NEW_TOKEN
413284

414285
desc 'check QAN data'
415286
get_qan20_values $cluster-pxc-0 $NEW_TOKEN

e2e-tests/run-pr.csv

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
auto-tuning,8.0
22
allocator,8.0
3+
allocator,8.4
34
backup-storage-tls,8.0
45
cross-site,8.0
56
custom-users,8.0
67
demand-backup-cloud,8.0
8+
demand-backup-cloud,8.4
79
demand-backup-cloud-pxb,8.0
810
demand-backup-encrypted-with-tls,5.7
911
demand-backup-encrypted-with-tls,8.0
@@ -13,16 +15,21 @@ demand-backup-encrypted-with-tls-pxb,8.0
1315
demand-backup-encrypted-with-tls-pxb,8.4
1416
demand-backup,8.0
1517
demand-backup-flow-control,8.0
18+
demand-backup-flow-control,8.4
1619
demand-backup-parallel,8.0
20+
demand-backup-parallel,8.4
1721
demand-backup-without-passwords,8.0
22+
demand-backup-without-passwords,8.4
1823
extra-pvc,8.0
1924
haproxy,5.7
2025
haproxy,8.0
26+
haproxy,8.4
2127
init-deploy,5.7
2228
init-deploy,8.0
2329
limits,8.0
2430
monitoring-2-0,8.0
2531
monitoring-pmm3,8.0
32+
monitoring-pmm3,8.4
2633
one-pod,5.7
2734
one-pod,8.0
2835
pitr,8.0
@@ -46,14 +53,19 @@ scaling-proxysql,8.0
4653
scaling,8.0
4754
scheduled-backup,5.7
4855
scheduled-backup,8.0
56+
scheduled-backup,8.4
4957
security-context,8.0
5058
smart-update1,8.0
59+
smart-update1,8.4
5160
smart-update2,8.0
61+
smart-update2,8.4
62+
smart-update3,8.0
5263
storage,8.0
5364
tls-issue-cert-manager-ref,8.0
5465
tls-issue-cert-manager,8.0
5566
tls-issue-self,8.0
5667
upgrade-consistency,8.0
68+
upgrade-consistency,8.4
5769
upgrade-haproxy,5.7
5870
upgrade-haproxy,8.0
5971
upgrade-proxysql,5.7

e2e-tests/run-release.csv

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ self-healing-advanced-chaos
4040
self-healing-chaos
4141
smart-update1
4242
smart-update2
43+
smart-update3
4344
storage
4445
tls-issue-cert-manager
4546
tls-issue-cert-manager-ref

0 commit comments

Comments
 (0)