11install_backup() {
2- NAMESPACE=central
3- VERSION=2.8.4
4- curl -O https://raw.githubusercontent.com/portworx/helm/master/stable/px-central-$VERSION.tgz
5- helm install px-central px-central-$VERSION.tgz --namespace $NAMESPACE --create-namespace --version $VERSION --set persistentStorage.enabled=true,persistentStorage.storageClassName="px-csi-db",pxbackup.enabled=true,oidc.centralOIDC.updateAdminProfile=false,installCRDs=true
6- until (kubectl get po -n $NAMESPACE -ljob-name=pxcentral-post-install-hook -o wide | awk '{print $1, $2, $3}' |grep "Completed"); do echo "Waiting for post install hook";sleep 3; done
7- until (kubectl get po -n $NAMESPACE -lapp=px-backup -o wide | awk '{print $1, $2, $3}' | grep "Running" | grep "1/1"); do echo "Waiting for backup service";sleep 3; done
8- # sometimes mongodb pods do not start. apply workaround if detected
9- echo "checking for statefulset pxc-backup-mongodb readiness"
10- while ! kubectl wait --for=jsonpath='{.status.readyReplicas}'=3 sts/pxc-backup-mongodb -n central --timeout 180s; do
11- echo "statefulset mongodb not ready"
12- POD=$(kubectl get pods -n central -l app.kubernetes.io/component=pxc-backup-mongodb -ojson | jq -r '.items[] | select(.status.containerStatuses[].ready==false) | .metadata.name' | head -n1)
13- echo "deleting data dir in failed pod $POD"
14- kubectl exec $POD -n central -- rm -rf /bitnami/mongodb/data/db
15- echo "waiting for $POD to restart"
16- done
17- # enable pxmonitor & grafana (needs a running px-backup-ui IP/Port)
18- pubIP=$(curl http://169.254.169.254/latest/meta-data/public-ipv4)
19- backupPort=$(kubectl get svc px-backup-ui -n $NAMESPACE -o=jsonpath='{.spec.ports[?(@.port==80)].nodePort}')
20- kubectl delete job pxcentral-post-install-hook --namespace $NAMESPACE
21- helm upgrade px-central px-central-$VERSION.tgz --namespace $NAMESPACE --version $VERSION --reuse-values --set pxmonitor.enabled=true --set pxmonitor.pxCentralEndpoint=$pubIP:$backupPort
22- until (kubectl get po -n $NAMESPACE -ljob-name=pxcentral-post-install-hook -o wide | awk '{print $1, $2, $3}' |grep "Completed"); do echo "Waiting for post install hook";sleep 3; done
23- BACKUP_POD_NAME=$(kubectl get pods -n $NAMESPACE -l app=px-backup -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
24- kubectl cp -n $NAMESPACE $BACKUP_POD_NAME:pxbackupctl/linux/pxbackupctl /usr/bin/pxbackupctl
25- chmod +x /usr/bin/pxbackupctl
26- kubectl patch svc px-backup-ui -n central -p '{"spec": { "type": "NodePort", "ports": [ { "nodePort": 30303, "port": 80, "protocol": "TCP", "targetPort": 8080 } ] } }'
27- BACKUP_POD_IP=$(kubectl get pods -n central -l app=px-backup -o jsonpath='{.items[*].status.podIP}' 2>/dev/null)
28- AWS_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null)
29- AWS_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null)
30- client_secret=$(kubectl get secret --namespace central pxc-backup-secret -o jsonpath={.data.OIDC_CLIENT_SECRET} | base64 --decode)
31- pxbackupctl login -s http://$pubIP:$backupPort -u admin -p admin
32- pxbackupctl create cloudcredential --aws-access-key $AWS_ACCESS_KEY --aws-secret-key $AWS_SECRET_KEY -e $BACKUP_POD_IP:10002 --orgID default -n s3 -p aws
33- sleep 5
34- cloud_credential_uid=$(pxbackupctl get cloudcredential -e $BACKUP_POD_IP:10002 --orgID default -o json | jq -cr '.[0].metadata.uid')
35- pxbackupctl create backuplocation --cloud-credential-name s3 --cloud-credential-Uid $cloud_credential_uid -n aws -p s3 --s3-endpoint https://s3.$aws_region.amazonaws.com --path $BACKUP_BUCKET --s3-region $aws_region -e $BACKUP_POD_IP:10002 --orgID default
36- pxbackupctl create schedulepolicy --interval-minutes 15 --interval-retain 12 --name example-schedule -e $BACKUP_POD_IP:10002 --orgID default
37- sleep 5
38- cat <<EOF >> /etc/motd
39- +================================================+
40- SAVE THE FOLLOWING DETAILS FOR FUTURE REFERENCES
41- +================================================+
42- PX-Central User Interface Access URL : http://$pubIP:$backupPort
43- PX-Central admin user name: admin
44- PX-Central admin user password: admin
45- +================================================+
46- EOF
2+ px pxb create cloudcredential --name aws-credential --provider aws --aws-access-key $AWS_ACCESS_KEY --aws-secret-key $AWS_SECRET_KEY
3+ px pxb create backuplocation --name s3 --provider s3 --path $BACKUP_BUCKET --cloud-credential-name aws-credential --s3-endpoint s3.amazonaws.com --s3-region $aws_region
4+ px pxb create schedulepolicy --name 15min-schedule --interval-minutes 15 --interval-retain 12
5+ PXB_URL=$(kubectl get svc px-backup-ui -n central -o=jsonpath='{.status.loadBalancer.ingress[0].hostname}')
6+ scp /etc/motd master-$[$cluster-1]:/etc/motd
477}
488
49- # Configure users on cluster 1
50- if [ $cluster -eq 1 ]; then
51- mkdir /etc/skel/.kube /etc/skel/yaml
52- cp /assets/petclinic/petclinic.yml /etc/skel/yaml
53- cat <<EOF >>/etc/skel/.bashrc
54- alias k=kubectl
55- complete -F __start_kubectl k
56- PS1='\e[0;33m[\u@px-training \W]\$ \e[m'
57- alias pxctl='kubectl pxc pxctl'
58- EOF
59- for i in $(seq 1 $clusters); do
60- useradd training$i
61- passwd --stdin training$i <<<portworx
62- done
63- echo -e 'kubectl exec $(kubectl get pod -n portworx -l name=portworx -o jsonpath="{.items[0].metadata.name}") -n portworx -c portworx -- curl -s https://ipinfo.io/ip\necho' >/usr/bin/getip
64- chmod +x /usr/bin/getip
65- fi
66-
67- # Wait for clusters to be up and copy kubeconfigs to cluster 1
68- while :; do
69- echo trying to copy kubeconfig
70- cat /root/.kube/config | ssh master-1 "su -l training$cluster -c 'cat >.kube/config' && exit 22"
71- [ $? -eq 22 ] && break
72- sleep 2
73- done
74-
75- export cluster
76- if [ $[2*$[$cluster/2]] -eq $cluster ]; then
77- # even cluster
78- while : ; do
79- token=$(kubectl exec -n portworx -it $(kubectl get pods -n portworx -lname=portworx --field-selector=status.phase=Running | tail -1 | cut -f 1 -d " ") -- /opt/pwx/bin/pxctl cluster token show 2>/dev/null | cut -f 3 -d " ")
80- echo $token | grep -Eq '\w{128}'
81- [ $? -eq 0 ] && break
82- sleep 5
83- echo waiting for portworx
84- done
85- UUID=$(kubectl get stc -n portworx -o jsonpath='{.items[].status.clusterUid}')
9+ install_dr() {
8610 AWS_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null | head -1)
8711 AWS_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null | head -1)
12+
8813 echo "Creating bucket '$DR_BUCKET' in region 'us-east-1', if it does not exist"
8914 aws s3 mb s3://$DR_BUCKET --region us-east-1
9015 BUCKET_REGION=$(aws s3api get-bucket-location --bucket $DR_BUCKET --output text)
@@ -93,56 +18,37 @@ if [ $[2*$[$cluster/2]] -eq $cluster ]; then
9318 BUCKET_REGION="us-east-1"
9419 fi
9520 echo "Bucket region: $BUCKET_REGION"
21+
22+ /usr/bin/pxctl license list | grep PX-DR | grep -q yes && MODE=async-dr || MODE=migration
23+ echo Mode is $MODE
24+
9625 while : ; do
97- kubectl exec $(kubectl get pod -n portworx -lname=portworx | tail -1 | cut -f 1 -d " ") -n portworx -c portworx -- /opt/pwx/bin/pxctl credentials delete clusterPair_$UUID
98- kubectl exec $(kubectl get pod -n portworx -lname=portworx | tail -1 | cut -f 1 -d " ") -n portworx -c portworx -- /opt/pwx/bin/pxctl credentials create --provider s3 --s3-access-key $AWS_ACCESS_KEY --s3-secret-key $AWS_SECRET_KEY --s3-region $BUCKET_REGION --s3-endpoint s3.$BUCKET_REGION.amazonaws.com --s3-storage-class STANDARD --bucket $DR_BUCKET clusterPair_$UUID
99- [ $? -eq 0 ] && break
100- sleep 1
101- done
102- while : ; do
103- ssh master-$[$cluster-1] kubectl exec '$(kubectl get pod -n portworx -lname=portworx | tail -1 | cut -f 1 -d " ") -n portworx -c portworx -- /opt/pwx/bin/pxctl credentials create --provider s3 --s3-access-key '$AWS_ACCESS_KEY' --s3-secret-key '$AWS_SECRET_KEY' --s3-region '$BUCKET_REGION' --s3-endpoint s3.'$BUCKET_REGION'.amazonaws.com --s3-storage-class STANDARD --bucket '$DR_BUCKET' clusterPair_'$UUID
104- [ $? -eq 0 ] && break
105- sleep 1
106- done
107- host=node-$cluster-1
108- storkctl generate clusterpair -n kube-system remotecluster-$cluster | sed "/insert_storage_options_here/c\ ip: $host\n token: $token\n mode: DisasterRecovery" >/tmp/cp.yml
109- while : ; do
110- scp /tmp/cp.yml master-1:/home/training$[$cluster-1]/yaml/cp.yml
111- ssh -oConnectTimeout=1 -oStrictHostKeyChecking=no master-1 "chown training$cluster.training$cluster /home/training$[$cluster-1]/yaml/cp.yml ; kubectl apply --kubeconfig /home/training$[$cluster-1]/.kube/config -f /home/training$[$cluster-1]/yaml/cp.yml"
112- [ $? -eq 0 ] && break
26+ scp master-$[$cluster+1]:/root/.kube/config /tmp/dest_kubeconfig
27+ storkctl create clusterpair remotecluster --namespace kube-system $LOCAL_EP $REMOTE_EP --dest-kube-file /tmp/dest_kubeconfig --src-kube-file /root/.kube/config --provider s3 --s3-endpoint s3.amazonaws.com --s3-access-key $AWS_ACCESS_KEY --s3-secret-key $AWS_SECRET_KEY --s3-region $BUCKET_REGION --bucket $DR_BUCKET --mode $MODE && break
11328 sleep 5
11429 done
30+
31+ kubectl apply -f /assets/dude/async-dr.yaml
32+ }
33+
34+ # Set up users on cluster 1
35+ if [ $cluster -eq 1 ]; then
36+ mkdir /etc/skel/.kube /etc/skel/yaml
37+ cp /assets/petclinic/petclinic.yml /assets/dude/async-dr.yaml /etc/skel/yaml
38+ cp /assets/dude/bashrc /etc/skel/.bashrc
39+ for i in $(seq 1 $clusters); do
40+ useradd training$i
41+ passwd --stdin training$i <<<portworx
42+ scp master-$i:/root/.kube/config /tmp/kubeconfig.$i
43+ install -o training$i -g training$i /tmp/kubeconfig.$i /home/training$i/.kube/config
44+ done
45+ cp /assets/dude/ui.sh /etc/profile.d/ui.sh
46+ fi
47+
48+ export cluster
49+ if [ $[2*$[$cluster/2]] -eq $cluster ]; then
11550 install_backup
11651else
117- # odd cluster
118- cat <<EOF >/tmp/async-dr.yml
119- apiVersion: stork.libopenstorage.org/v1alpha1
120- kind: SchedulePolicy
121- metadata:
122- name: drpolicy
123- policy:
124- interval:
125- intervalMinutes: 2
126- ---
127- apiVersion: stork.libopenstorage.org/v1alpha1
128- kind: MigrationSchedule
129- metadata:
130- name: appmigrationschedule
131- namespace: kube-system
132- spec:
133- template:
134- spec:
135- clusterPair: remotecluster-$[$cluster+1]
136- includeResources: true
137- startApplications: false
138- namespaces:
139- - petclinic
140- schedulePolicyName: drpolicy
141- EOF
142- while : ; do
143- scp /tmp/async-dr.yml master-1:/home/training$cluster/yaml/async-dr.yml
144- ssh -oConnectTimeout=1 -oStrictHostKeyChecking=no master-1 "chown training$cluster.training$cluster /home/training$cluster/yaml/async-dr.yml ; kubectl apply --kubeconfig /home/training$cluster/.kube/config -f /home/training$cluster/yaml/async-dr.yml"
145- [ $? -eq 0 ] && break
146- sleep 5
147- done
52+ kubectl delete ns central &
53+ install_dr
14854fi
0 commit comments