66# enable some more output
77set -x
88
9- # these variables need to be set
10- [ -n " ${GERRIT_HOST} " ]
11- [ -n " ${GERRIT_PROJECT} " ]
12- [ -n " ${GERRIT_REFSPEC} " ]
13-
14- # only use https for now
15- GIT_REPO=" https://${GERRIT_HOST} /${GERRIT_PROJECT} "
16-
17- # enable the Storage SIG Gluster and Ceph repositories
18- dnf -y install centos-release-ceph epel-release
19-
20- BUILDREQUIRES=" git bison cmake dbus-devel flex gcc-c++ krb5-devel libacl-devel libblkid-devel libcap-devel redhat-rpm-config rpm-build xfsprogs-devel lvm2"
21-
22- BUILDREQUIRES_EXTRA=" libnsl2-devel libnfsidmap-devel libwbclient-devel userspace-rcu-devel"
23-
24- # basic packages to install
25- case " ${CENTOS_VERSION} " in
26- 7)
27- yum install -y ${BUILDREQUIRES} ${BUILDREQUIRES_EXTRA} python2-devel
28- ;;
29- 8s)
30- yum install -y ${BUILDREQUIRES}
31- yum install --enablerepo=powertools -y ${BUILDREQUIRES_EXTRA}
32- yum install -y libcephfs-devel
33- ;;
34- 9s)
35- yum install -y ${BUILDREQUIRES}
36- yum install --enablerepo=crb -y ${BUILDREQUIRES_EXTRA}
37- yum install -y libcephfs-devel
38- ;;
39- esac
40-
41- git clone --depth=1 ${GIT_REPO}
42- cd $( basename " ${GERRIT_PROJECT} " )
43- git fetch origin ${GERRIT_REFSPEC} && git checkout FETCH_HEAD
44-
45- # update libntirpc
46- git submodule update --recursive --init || git submodule sync
47-
48- # cleanup old build dir
49- [ -d build ] && rm -rf build
50-
51- mkdir build
52- cd build
53-
54- ( cmake ../src -DCMAKE_BUILD_TYPE=Maintainer -DUSE_FSAL_GLUSTER=OFF -DUSE_FSAL_CEPH=ON -DUSE_FSAL_RGW=OFF -DUSE_DBUS=ON -DUSE_ADMIN_TOOLS=ON && make) || touch FAILED
55- make install
56-
57- # dont vote if the subject of the last change includes the word "WIP"
58- if ( git log --oneline -1 | grep -q -i -w ' WIP' )
59- then
60- echo " Change marked as WIP, not posting result to GerritHub."
61- touch WIP
62- fi
63-
64- # If failure found during build, return the status and skip proceeding
65- # to ceph configuration
66-
67-
68- # we accept different return values
69- # 0 - SUCCESS + VOTE
70- # 1 - FAILED + VOTE
71- # 10 - SUCCESS + REPORT ONLY (NO VOTE)
72- # 11 - FAILED + REPORT ONLY (NO VOTE)
73- RET=0
74- if [ -e FAILED ]
75- then
76- exit ${RET}
77- fi
78- if [ -e WIP ]
79- then
80- RET=$[RET + 10]
81- exit ${RET}
82- fi
83-
84- # Create a virtual disk file (for OSD storage):
85- truncate -s 35G /tmp/ceph-disk.img
86- losetup -f /tmp/ceph-disk.img # Attaches as a loop device (e.g., /dev/loop0)
87-
88- pvcreate /dev/loop0
89- vgcreate ceph-vg /dev/loop0
90- lvcreate -L 10G -n osd1 ceph-vg
91- lvcreate -L 10G -n osd2 ceph-vg
92- lvcreate -L 10G -n osd3 ceph-vg
93-
94- # Install and configure ceph cluster
95- dnf install -y cephadm
96- cephadm add-repo --release squid
97- dnf install -y ceph
98- cephadm bootstrap --mon-ip $( hostname -I | awk ' {print $1}' ) --single-host-defaults --allow-fqdn-hostname
99- ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring
100-
101- # Attach the virtual disks
102- ceph-volume lvm create --data /dev/ceph-vg/osd1
103- ceph-volume lvm create --data /dev/ceph-vg/osd2
104- ceph-volume lvm create --data /dev/ceph-vg/osd3
105-
106- # Verifying the disks
107- lvdisplay
108- lsblk
109- ceph orch device ls
110-
111- # Now auto assign these lvms to the osd's
112- ceph orch apply osd --all-available-devices
113-
114- # Wait for the osd's to be added
115-
116- echo " Waiting for at least one OSD to be ready..."
117- TIMEOUT=300
118- START_TIME=$( date +%s)
119- while true ; do
120- # Check if any OSD service exists and has at least one running OSD
121- OSD_STATUS=$( ceph orch ls --service-type osd --format json 2> /dev/null | \
122- jq -r ' .[0].status | select(.running != null) | .running >= 1' )
123-
124- # Check if the command succeeded and we got "true"
125- if [ " $OSD_STATUS " = " true" ]; then
126- echo " OSD is ready!"
127- break
128- fi
129-
130- # Check timeout if set
131- if [ " $TIMEOUT " -gt 0 ]; then
132- CURRENT_TIME=$( date +%s)
133- ELAPSED=$(( CURRENT_TIME - START_TIME))
134- if [ " $ELAPSED " -ge " $TIMEOUT " ]; then
135- echo " Timeout reached while waiting for OSD to be ready"
136- exit 1
137- fi
138- fi
139-
140- sleep 5
141- done
142-
143- # view the osd's
144- ceph osd tree
145- # Create a cephfs volume
146- ceph fs volume create cephfs
147-
148- # Create subvolumegroup
149- ceph fs subvolumegroup create cephfs ganeshagroup
150-
151- # Create subvolume
152- ceph fs subvolume create cephfs nfs_subvol --group_name ganeshagroup --namespace-isolated
153-
154- # Get subvolume path
155- CEPHFS_NAME=" cephfs"
156- SUBVOLUME_NAME=" nfs_subvol"
157- GROUP_NAME=" ganeshagroup"
158- SUBVOL_PATH=$( ceph fs subvolume getpath " $CEPHFS_NAME " " $SUBVOLUME_NAME " --group_name " $GROUP_NAME " 2> /dev/null)
159-
160- # Verify path was obtained
161- if [ -z " $SUBVOL_PATH " ]; then
162- echo " ERROR: Failed to get subvolume path."
163- exit 1
164- fi
165- echo " Subvolume path: $SUBVOL_PATH "
166-
167- # create ganesha.conf file
168- echo " NFS_CORE_PARAM {
169- Enable_NLM = false;
170- Enable_RQUOTA = false;
171- Protocols = 4;
172- }
173-
174- EXPORT_DEFAULTS {
175- Access_Type = RW;
176- }
177- EXPORT {
178- Export_ID = 101;
179- Path = \" $SUBVOL_PATH \" ;
180- Pseudo = \" /nfs/cephfs\" ;
181- Protocols = 4;
182- Transports = TCP;
183- Access_Type = RW;
184- Squash = None;
185- FSAL {
186- Name = \" CEPH\" ;
187- }
188- }" > /etc/ganesha/ganesha.conf
189-
190- # View the ganesha conf file
191- cat /etc/ganesha/ganesha.conf
192-
193- mkdir -p /var/run/ganesha
194- chmod 755 /var/run/ganesha
195- chown root:root /var/run/ganesha
196-
197- # Creating backend recovery dir for nfs ganesha
198- mkdir -p /var/lib/nfs/ganesha
199- chmod 755 /var/lib/nfs/ganesha
200- chown root:root /var/lib/nfs/ganesha
201-
202- ganesha.nfsd -f /etc/ganesha/ganesha.conf -L /var/log/ganesha.log
203- if pgrep ganesha > /dev/null; then
204- echo " [OK] Service ganesha is running"
205- echo $( pgrep ganesha)
206- else
207- echo " [ERROR] Service ganesha is NOT running" >&2
208- exit 1
209- fi
9+ # Run basic ceph deployment and ganesha service
10+ sh $WORKSPACE /ci-tests/ceph/basic-ceph.sh
21011
21112# Run Cthon post successful cluster creation
21213
@@ -232,25 +33,25 @@ cd cthon04
23233
23334
23435# RUN CTHON for v3 < SKIPPING AS CURRENT RUNS FOCUS ON 4 and 4.1 >
235- # mkdir -p /mnt/nfs_ceph_v3
236- # if mount -t nfs -o vers=3 $(hostname -I | awk '{print $1}'):/nfs/cephfs /mnt/nfs_ceph_v3; then
237- # echo "NFS mount successful!"
238- # if mountpoint -q /mnt/nfs_ceph_v3; then
239- # echo "Verification: NFS is properly mounted at /mnt/nfs_ceph_v3"
240- # echo "Mounted NFS details:"
241- # mount | grep /mnt/nfs_ceph_v3
242- # else
243- # echo "ERROR: Mount command succeeded but verification failed!" >&2
244- # exit 1
245- # fi
246- # else
247- # echo "ERROR: Failed to mount NFS share!" >&2
248- # touch FAILED
249- # exit 1
250- # fi
251- # # Run Cthon
252- # ./server -a -p /nfs/cephfs -m /mnt/nfs_ceph_v3 $(hostname -I | awk '{print $1}')
253-
36+ mkdir -p /mnt/nfs_ceph_v3
37+ if mount -t nfs -o vers=3 $( hostname -I | awk ' {print $1}' ) :/nfs/cephfs /mnt/nfs_ceph_v3; then
38+ echo " NFS mount successful!"
39+ if mountpoint -q /mnt/nfs_ceph_v3; then
40+ echo " Verification: NFS is properly mounted at /mnt/nfs_ceph_v3"
41+ echo " Mounted NFS details:"
42+ mount | grep /mnt/nfs_ceph_v3
43+ else
44+ echo " ERROR: Mount command succeeded but verification failed!" >&2
45+ exit 1
46+ fi
47+ else
48+ echo " ERROR: Failed to mount NFS share!" >&2
49+ touch FAILED
50+ exit 1
51+ fi
52+ # Run Cthon
53+ ./server -a -p /nfs/cephfs -m /mnt/nfs_ceph_v3 $( hostname -I | awk ' {print $1}' )
54+ ./test $( hostname -I | awk ' {print $1} ' ) :/nfs/cephfs /mnt/nfs_ceph_v3
25455
25556# Run CTHON for v4.0
25657mkdir -p /mnt/nfs_ceph_v4
@@ -271,22 +72,21 @@ else
27172 exit 1
27273fi
27374# Run Cthon
75+
27476if [ " ${CONCURRENT_JOBS} " == " True" ]; then
27577 ./server -c 100000 /nfs/cephfs -m /mnt/nfs_ceph_v4 $( hostname -I | awk ' {print $1}' )
27678else
27779 ./server -a -p /nfs/cephfs -m /mnt/nfs_ceph_v4 $( hostname -I | awk ' {print $1}' )
27880fi
27981
280-
281-
282- # Run CTHON for v4.1
283- mkdir -p /mnt/nfs_ceph_v41
284- if mount -t nfs -o vers=4.1 $( hostname -I | awk ' {print $1}' ) :/nfs/cephfs /mnt/nfs_ceph_v41; then
82+ # Run CTHON for v4.2
83+ mkdir -p /mnt/nfs_ceph_v42
84+ if mount -t nfs -o vers=4.2 $( hostname -I | awk ' {print $1}' ) :/nfs/cephfs /mnt/nfs_ceph_v42; then
28585 echo " NFS mount successful!"
286- if mountpoint -q /mnt/nfs_ceph_v41 ; then
287- echo " Verification: NFS is properly mounted at /mnt/nfs_ceph_v41 "
86+ if mountpoint -q /mnt/nfs_ceph_v42 ; then
87+ echo " Verification: NFS is properly mounted at /mnt/nfs_ceph_v42 "
28888 echo " Mounted NFS details:"
289- mount | grep /mnt/nfs_ceph_v41
89+ mount | grep /mnt/nfs_ceph_v42
29090 else
29191 echo " ERROR: Mount command succeeded but verification failed!" >&2
29292 exit 1
@@ -303,5 +103,5 @@ if [ "${CONCURRENT_JOBS}" == "True" ]; then
303103else
304104 ./server -a -p /nfs/cephfs -m /mnt/nfs_ceph_v4 $( hostname -I | awk ' {print $1}' )
305105fi
306- exit 0
307106
107+ exit 0
0 commit comments