Skip to content

Commit 3ef0dfa

Browse files
committed
mgr/rook: adding e2e version based on minikube
Signed-off-by: Redouane Kachach <[email protected]>
1 parent 1a67e0c commit 3ef0dfa

File tree

6 files changed

+209
-0
lines changed

6 files changed

+209
-0
lines changed

src/pybind/mgr/rook/ci/Dockerfile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
FROM quay.io/ceph/daemon-base:latest-main
2+
COPY ./tmp_build/orchestrator /usr/share/ceph/mgr/orchestrator
3+
COPY ./tmp_build/rook /usr/share/ceph/mgr/rook
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#!/usr/bin/env bash
2+
3+
set -ex
4+
5+
# Execute tests
6+
: ${CEPH_DEV_FOLDER:=${PWD}}
7+
${CEPH_DEV_FOLDER}/src/pybind/mgr/rook/ci/scripts/bootstrap-rook-cluster.sh
8+
cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/rook/ci/tests
9+
behave
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
#!/usr/bin/env bash
2+
3+
set -eEx
4+
5+
: ${CEPH_DEV_FOLDER:=${PWD}}
6+
KUBECTL="minikube kubectl --"
7+
8+
# We build a local ceph image that contains the latest code
9+
# plus changes from the PR. This image will be used by the docker
10+
# running inside the minikube to start the different ceph pods
11+
LOCAL_CEPH_IMG="local/ceph"
12+
13+
on_error() {
14+
echo "on error"
15+
minikube delete
16+
}
17+
18+
configure_libvirt(){
19+
sudo usermod -aG libvirt $(id -un)
20+
sudo su -l $USER # Avoid having to log out and log in for group addition to take effect.
21+
sudo systemctl enable --now libvirtd
22+
sudo systemctl restart libvirtd
23+
sleep 10 # wait some time for libvirtd service to restart
24+
}
25+
26+
setup_minikube_env() {
27+
28+
# Check if Minikube is running
29+
if minikube status > /dev/null 2>&1; then
30+
echo "Minikube is running"
31+
minikube stop
32+
minikube delete
33+
else
34+
echo "Minikube is not running"
35+
fi
36+
37+
rm -rf ~/.minikube
38+
minikube start --memory="4096" --cpus="2" --disk-size=10g --extra-disks=1 --driver kvm2
39+
# point Docker env to use docker daemon running on minikube
40+
eval $(minikube docker-env -p minikube)
41+
}
42+
43+
build_ceph_image() {
44+
wget -q -O cluster-test.yaml https://raw.githubusercontent.com/rook/rook/master/deploy/examples/cluster-test.yaml
45+
CURR_CEPH_IMG=$(grep -E '^\s*image:\s+' cluster-test.yaml | sed 's/.*image: *\([^ ]*\)/\1/')
46+
47+
cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/rook/ci
48+
mkdir -p tmp_build/rook
49+
mkdir -p tmp_build/orchestrator
50+
cp ./../../orchestrator/*.py tmp_build/orchestrator
51+
cp ../*.py tmp_build/rook
52+
53+
# we use the following tag to trick the Docker
54+
# running inside minikube so it uses this image instead
55+
# of pulling it from the registry
56+
docker build --tag ${LOCAL_CEPH_IMG} .
57+
docker tag ${LOCAL_CEPH_IMG} ${CURR_CEPH_IMG}
58+
59+
# cleanup
60+
rm -rf tmp_build
61+
cd ${CEPH_DEV_FOLDER}
62+
}
63+
64+
create_rook_cluster() {
65+
wget -q -O cluster-test.yaml https://raw.githubusercontent.com/rook/rook/master/deploy/examples/cluster-test.yaml
66+
$KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/crds.yaml
67+
$KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/common.yaml
68+
$KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/operator.yaml
69+
$KUBECTL create -f cluster-test.yaml
70+
$KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/dashboard-external-http.yaml
71+
$KUBECTL create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/toolbox.yaml
72+
}
73+
74+
wait_for_rook_operator() {
75+
local max_attempts=10
76+
local sleep_interval=20
77+
local attempts=0
78+
$KUBECTL rollout status deployment rook-ceph-operator -n rook-ceph --timeout=180s
79+
PHASE=$($KUBECTL get cephclusters.ceph.rook.io -n rook-ceph -o jsonpath='{.items[?(@.kind == "CephCluster")].status.phase}')
80+
echo "PHASE: $PHASE"
81+
while ! $KUBECTL get cephclusters.ceph.rook.io -n rook-ceph -o jsonpath='{.items[?(@.kind == "CephCluster")].status.phase}' | grep -q "Ready"; do
82+
echo "Waiting for cluster to be ready..."
83+
sleep $sleep_interval
84+
attempts=$((attempts+1))
85+
if [ $attempts -ge $max_attempts ]; then
86+
echo "Maximum number of attempts ($max_attempts) reached. Exiting..."
87+
return 1
88+
fi
89+
done
90+
}
91+
92+
wait_for_ceph_cluster() {
93+
local max_attempts=10
94+
local sleep_interval=20
95+
local attempts=0
96+
$KUBECTL rollout status deployment rook-ceph-tools -n rook-ceph --timeout=30s
97+
while ! $KUBECTL get cephclusters.ceph.rook.io -n rook-ceph -o jsonpath='{.items[?(@.kind == "CephCluster")].status.ceph.health}' | grep -q "HEALTH_OK"; do
98+
echo "Waiting for Ceph cluster installed"
99+
sleep $sleep_interval
100+
attempts=$((attempts+1))
101+
if [ $attempts -ge $max_attempts ]; then
102+
echo "Maximum number of attempts ($max_attempts) reached. Exiting..."
103+
return 1
104+
fi
105+
done
106+
echo "Ceph cluster installed and running"
107+
}
108+
109+
show_info() {
110+
DASHBOARD_PASSWORD=$($KUBECTL -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo)
111+
IP_ADDR=$($KUBECTL get po --selector="app=rook-ceph-mgr" -n rook-ceph --output jsonpath='{.items[*].status.hostIP}')
112+
PORT="$($KUBECTL -n rook-ceph -o=jsonpath='{.spec.ports[?(@.name == "dashboard")].nodePort}' get services rook-ceph-mgr-dashboard-external-http)"
113+
BASE_URL="http://$IP_ADDR:$PORT"
114+
echo "==========================="
115+
echo "Ceph Dashboard: "
116+
echo " IP_ADDRESS: $BASE_URL"
117+
echo " PASSWORD: $DASHBOARD_PASSWORD"
118+
echo "==========================="
119+
}
120+
121+
####################################################################
122+
####################################################################
123+
124+
trap 'on_error $? $LINENO' ERR
125+
126+
configure_libvirt
127+
setup_minikube_env
128+
build_ceph_image
129+
create_rook_cluster
130+
wait_for_rook_operator
131+
wait_for_ceph_cluster
132+
show_info
133+
134+
####################################################################
135+
####################################################################
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
Feature: Testing Rook orchestrator commands
2+
Ceph has been installed using the cluster CRD available in deploy/examples/cluster-test.yaml and
3+
4+
Scenario: Verify ceph cluster health
5+
When I run
6+
"""
7+
ceph health | grep HEALTH
8+
"""
9+
Then I get
10+
"""
11+
HEALTH_OK
12+
"""
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from behave import *
2+
from utils import *
3+
import re
4+
5+
@when("I run")
6+
def run_step(context):
7+
context.output = run_commands(context.text)
8+
9+
@then("I get")
10+
def verify_result_step(context):
11+
print(f"Output is:\n{context.output}\n--------------\n")
12+
assert context.text == context.output
13+
14+
@then("I get something like")
15+
def verify_fuzzy_result_step(context):
16+
output_lines = context.output.split("\n")
17+
expected_lines = context.text.split("\n")
18+
num_lines = min(len(output_lines), len(expected_lines))
19+
for n in range(num_lines):
20+
if not re.match(expected_lines[n], output_lines[n]):
21+
raise
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import subprocess
2+
3+
ROOK_CEPH_COMMAND = "minikube kubectl -- -n rook-ceph exec -it deploy/rook-ceph-tools -- "
4+
CLUSTER_COMMAND = "minikube kubectl -- "
5+
6+
7+
def execute_command(command: str) -> str:
8+
output = ""
9+
try:
10+
proc = subprocess.run(command, shell=True, capture_output=True, text=True)
11+
output = proc.stdout
12+
except Exception as ex:
13+
output = f"Error executing command: {ex}"
14+
15+
return output
16+
17+
18+
def run_commands(commands: str) -> str:
19+
commands_list = commands.split("\n")
20+
output = ""
21+
for cmd in commands_list:
22+
if cmd.startswith("ceph"):
23+
prefix = ROOK_CEPH_COMMAND
24+
else:
25+
prefix = CLUSTER_COMMAND
26+
command = prefix + cmd
27+
output = execute_command(command)
28+
29+
return output.strip("\n")

0 commit comments

Comments
 (0)