|
| 1 | +#!/usr/bin/env bash |
| 2 | + |
| 3 | +set -eEx |
| 4 | + |
| 5 | +: ${CEPH_DEV_FOLDER:=${PWD}} |
| 6 | +KUBECTL="minikube kubectl --" |
| 7 | + |
| 8 | +# We build a local ceph image that contains the latest code |
| 9 | +# plus changes from the PR. This image will be used by the docker |
| 10 | +# running inside the minikube to start the different ceph pods |
| 11 | +LOCAL_CEPH_IMG="local/ceph" |
| 12 | + |
| 13 | +on_error() { |
| 14 | + echo "on error" |
| 15 | + minikube delete |
| 16 | +} |
| 17 | + |
| 18 | +configure_libvirt(){ |
| 19 | + sudo usermod -aG libvirt $(id -un) |
| 20 | + sudo su -l $USER # Avoid having to log out and log in for group addition to take effect. |
| 21 | + sudo systemctl enable --now libvirtd |
| 22 | + sudo systemctl restart libvirtd |
| 23 | + sleep 10 # wait some time for libvirtd service to restart |
| 24 | +} |
| 25 | + |
| 26 | +setup_minikube_env() { |
| 27 | + |
| 28 | + # Check if Minikube is running |
| 29 | + if minikube status > /dev/null 2>&1; then |
| 30 | + echo "Minikube is running" |
| 31 | + minikube stop |
| 32 | + minikube delete |
| 33 | + else |
| 34 | + echo "Minikube is not running" |
| 35 | + fi |
| 36 | + |
| 37 | + rm -rf ~/.minikube |
| 38 | + minikube start --memory="4096" --cpus="2" --disk-size=10g --extra-disks=1 --driver kvm2 |
| 39 | + # point Docker env to use docker daemon running on minikube |
| 40 | + eval $(minikube docker-env -p minikube) |
| 41 | +} |
| 42 | + |
| 43 | +build_ceph_image() { |
| 44 | + wget -q -O cluster-test.yaml https://raw.githubusercontent.com/rook/rook/master/deploy/examples/cluster-test.yaml |
| 45 | + CURR_CEPH_IMG=$(grep -E '^\s*image:\s+' cluster-test.yaml | sed 's/.*image: *\([^ ]*\)/\1/') |
| 46 | + |
| 47 | + cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/rook/ci |
| 48 | + mkdir -p tmp_build/rook |
| 49 | + mkdir -p tmp_build/orchestrator |
| 50 | + cp ./../../orchestrator/*.py tmp_build/orchestrator |
| 51 | + cp ../*.py tmp_build/rook |
| 52 | + |
| 53 | + # we use the following tag to trick the Docker |
| 54 | + # running inside minikube so it uses this image instead |
| 55 | + # of pulling it from the registry |
| 56 | + docker build --tag ${LOCAL_CEPH_IMG} . |
| 57 | + docker tag ${LOCAL_CEPH_IMG} ${CURR_CEPH_IMG} |
| 58 | + |
| 59 | + # cleanup |
| 60 | + rm -rf tmp_build |
| 61 | + cd ${CEPH_DEV_FOLDER} |
| 62 | +} |
| 63 | + |
| 64 | +create_rook_cluster() { |
| 65 | + wget -q -O cluster-test.yaml https://raw.githubusercontent.com/rook/rook/master/deploy/examples/cluster-test.yaml |
| 66 | + $KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/crds.yaml |
| 67 | + $KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/common.yaml |
| 68 | + $KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/operator.yaml |
| 69 | + $KUBECTL create -f cluster-test.yaml |
| 70 | + $KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/dashboard-external-http.yaml |
| 71 | + $KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/toolbox.yaml |
| 72 | +} |
| 73 | + |
| 74 | +wait_for_rook_operator() { |
| 75 | + local max_attempts=10 |
| 76 | + local sleep_interval=20 |
| 77 | + local attempts=0 |
| 78 | + $KUBECTL rollout status deployment rook-ceph-operator -n rook-ceph --timeout=180s |
| 79 | + PHASE=$($KUBECTL get cephclusters.ceph.rook.io -n rook-ceph -o jsonpath='{.items[?(@.kind == "CephCluster")].status.phase}') |
| 80 | + echo "PHASE: $PHASE" |
| 81 | + while ! $KUBECTL get cephclusters.ceph.rook.io -n rook-ceph -o jsonpath='{.items[?(@.kind == "CephCluster")].status.phase}' | grep -q "Ready"; do |
| 82 | + echo "Waiting for cluster to be ready..." |
| 83 | + sleep $sleep_interval |
| 84 | + attempts=$((attempts+1)) |
| 85 | + if [ $attempts -ge $max_attempts ]; then |
| 86 | + echo "Maximum number of attempts ($max_attempts) reached. Exiting..." |
| 87 | + return 1 |
| 88 | + fi |
| 89 | + done |
| 90 | +} |
| 91 | + |
| 92 | +wait_for_ceph_cluster() { |
| 93 | + local max_attempts=10 |
| 94 | + local sleep_interval=20 |
| 95 | + local attempts=0 |
| 96 | + $KUBECTL rollout status deployment rook-ceph-tools -n rook-ceph --timeout=30s |
| 97 | + while ! $KUBECTL get cephclusters.ceph.rook.io -n rook-ceph -o jsonpath='{.items[?(@.kind == "CephCluster")].status.ceph.health}' | grep -q "HEALTH_OK"; do |
| 98 | + echo "Waiting for Ceph cluster installed" |
| 99 | + sleep $sleep_interval |
| 100 | + attempts=$((attempts+1)) |
| 101 | + if [ $attempts -ge $max_attempts ]; then |
| 102 | + echo "Maximum number of attempts ($max_attempts) reached. Exiting..." |
| 103 | + return 1 |
| 104 | + fi |
| 105 | + done |
| 106 | + echo "Ceph cluster installed and running" |
| 107 | +} |
| 108 | + |
| 109 | +show_info() { |
| 110 | + DASHBOARD_PASSWORD=$($KUBECTL -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo) |
| 111 | + IP_ADDR=$($KUBECTL get po --selector="app=rook-ceph-mgr" -n rook-ceph --output jsonpath='{.items[*].status.hostIP}') |
| 112 | + PORT="$($KUBECTL -n rook-ceph -o=jsonpath='{.spec.ports[?(@.name == "dashboard")].nodePort}' get services rook-ceph-mgr-dashboard-external-http)" |
| 113 | + BASE_URL="http://$IP_ADDR:$PORT" |
| 114 | + echo "===========================" |
| 115 | + echo "Ceph Dashboard: " |
| 116 | + echo " IP_ADDRESS: $BASE_URL" |
| 117 | + echo " PASSWORD: $DASHBOARD_PASSWORD" |
| 118 | + echo "===========================" |
| 119 | +} |
| 120 | + |
| 121 | +#################################################################### |
| 122 | +#################################################################### |
| 123 | + |
| 124 | +trap 'on_error $? $LINENO' ERR |
| 125 | + |
| 126 | +configure_libvirt |
| 127 | +setup_minikube_env |
| 128 | +build_ceph_image |
| 129 | +create_rook_cluster |
| 130 | +wait_for_rook_operator |
| 131 | +wait_for_ceph_cluster |
| 132 | +show_info |
| 133 | + |
| 134 | +#################################################################### |
| 135 | +#################################################################### |
0 commit comments