@@ -6,205 +6,68 @@ set -e
66# enable some more output
77set -x
88
9- # these variables need to be set
10- [ -n " ${GERRIT_HOST} " ]
11- [ -n " ${GERRIT_PROJECT} " ]
12- [ -n " ${GERRIT_REFSPEC} " ]
13-
14- # only use https for now
15- GIT_REPO=" https://${GERRIT_HOST} /${GERRIT_PROJECT} "
16-
17- # enable the Storage SIG Gluster and Ceph repositories
18- dnf -y install centos-release-ceph epel-release
19-
20- BUILDREQUIRES=" git bison cmake dbus-devel flex gcc-c++ krb5-devel libacl-devel libblkid-devel libcap-devel redhat-rpm-config rpm-build xfsprogs-devel lvm2"
21-
22- BUILDREQUIRES_EXTRA=" libnsl2-devel libnfsidmap-devel libwbclient-devel userspace-rcu-devel"
23-
24- # basic packages to install
25- case " ${CENTOS_VERSION} " in
26- 7)
27- yum install -y ${BUILDREQUIRES} ${BUILDREQUIRES_EXTRA} python2-devel
28- ;;
29- 8s)
30- yum install -y ${BUILDREQUIRES}
31- yum install --enablerepo=powertools -y ${BUILDREQUIRES_EXTRA}
32- yum install -y libcephfs-devel
33- ;;
34- 9s)
35- yum install -y ${BUILDREQUIRES}
36- yum install --enablerepo=crb -y ${BUILDREQUIRES_EXTRA}
37- yum install -y libcephfs-devel
38- ;;
39- esac
40-
41- git clone --depth=1 ${GIT_REPO}
42- cd $( basename " ${GERRIT_PROJECT} " )
43- git fetch origin ${GERRIT_REFSPEC} && git checkout FETCH_HEAD
44-
45- # update libntirpc
46- git submodule update --recursive --init || git submodule sync
47-
48- # cleanup old build dir
49- [ -d build ] && rm -rf build
50-
51- mkdir build
52- cd build
53-
54- ( cmake ../src -DCMAKE_BUILD_TYPE=Maintainer -DUSE_FSAL_GLUSTER=OFF -DUSE_FSAL_CEPH=ON -DUSE_FSAL_RGW=OFF -DUSE_DBUS=ON -DUSE_ADMIN_TOOLS=ON && make) || touch FAILED
55- make install
56-
57- # dont vote if the subject of the last change includes the word "WIP"
58- if ( git log --oneline -1 | grep -q -i -w ' WIP' )
59- then
60- echo " Change marked as WIP, not posting result to GerritHub."
61- touch WIP
9+ [ -n " ${SERVER} " ]
10+ [ -n " ${EXPORT} " ]
11+ [ -n " ${TEST_PARAMETERS} " ]
12+
13+ # install build and runtime dependencies
14+ dnf -y install git gcc nfs-utils redhat-rpm-config krb5-devel python3-devel python3-gssapi python3-ply
15+
16+ rm -rf /root/pynfs && git clone git://git.linux-nfs.org/projects/cdmackay/pynfs.git
17+
18+ cd /root/pynfs && yes | python3 setup.py build > /tmp/output_tempfile.txt
19+ echo $?
20+
21+ LOG_FILE40=" /tmp/pynfs" $( date +%s) " .log"
22+ cd /root/pynfs/nfs4.0
23+ COMMAND=' ./testserver.py ${SERVER}:${EXPORT} --verbose --maketree --showomit --rundeps all ganesha ${TEST_PARAMETERS} >> "${LOG_FILE40}"'
24+ TARGET_USER=" testuser"
25+
26+ # Create user if not exists
27+ if id " $TARGET_USER " & > /dev/null; then
28+ echo " User $TARGET_USER already exists."
29+ else
30+ echo " Creating user $TARGET_USER ..."
31+ useradd -m " $TARGET_USER "
6232fi
6333
64- # If failure found during build, return the status and skip proceeding
65- # to ceph configuration
34+ # Run the command as non-root user
35+ echo " Running command as $TARGET_USER ..."
36+ sudo -u " $TARGET_USER " bash -c " $COMMAND "
37+ RETURN_CODE40=$?
38+
39+ echo " pynfs 4.0 test output:"
40+ cat $LOG_FILE40
6641
42+ LOG_FILE41=" /tmp/pynfs" $( date +%s) " .log"
43+ cd /root/pynfs/nfs4.1
44+ COMMAND=' ./testserver.py ${SERVER}:${EXPORT} all ganesha --verbose --maketree --showomit --rundeps >> "${LOG_FILE41}"'
6745
68- # we accept different return values
69- # 0 - SUCCESS + VOTE
70- # 1 - FAILED + VOTE
71- # 10 - SUCCESS + REPORT ONLY (NO VOTE)
72- # 11 - FAILED + REPORT ONLY (NO VOTE)
73- RET=0
74- if [ -e FAILED ]
75- then
76- exit ${RET}
46+ # Run the command as non-root user
47+ echo " Running command as $TARGET_USER ..."
48+ sudo -u " $TARGET_USER " bash -c " $COMMAND "
49+ RETURN_CODE41=$?
50+
51+ echo " pynfs 4.1 test output:"
52+ cat $LOG_FILE41
53+
54+ if [ $RETURN_CODE40 == 0 ]; then
55+ echo " All tests passed in pynfs 4.0 test suite"
7756fi
78- if [ -e WIP ]
79- then
80- RET=$[RET + 10]
81- exit ${RET}
57+
58+ if [ $RETURN_CODE41 == 0 ]; then
59+ echo " All tests passed in pynfs 4.1 test suite"
8260fi
8361
84- # Create a virtual disk file (for OSD storage):
85- truncate -s 35G /tmp/ceph-disk.img
86- losetup -f /tmp/ceph-disk.img # Attaches as a loop device (e.g., /dev/loop0)
87-
88- pvcreate /dev/loop0
89- vgcreate ceph-vg /dev/loop0
90- lvcreate -L 10G -n osd1 ceph-vg
91- lvcreate -L 10G -n osd2 ceph-vg
92- lvcreate -L 10G -n osd3 ceph-vg
93-
94- # Install and configure ceph cluster
95- dnf install -y cephadm
96- cephadm add-repo --release squid
97- dnf install -y ceph
98- cephadm bootstrap --mon-ip $( hostname -I | awk ' {print $1}' ) --single-host-defaults --allow-fqdn-hostname
99- ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring
100-
101- # Attach the virtual disks
102- ceph-volume lvm create --data /dev/ceph-vg/osd1
103- ceph-volume lvm create --data /dev/ceph-vg/osd2
104- ceph-volume lvm create --data /dev/ceph-vg/osd3
105-
106- # Verifying the disks
107- lvdisplay
108- lsblk
109- ceph orch device ls
110-
111- # Now auto assign these lvms to the osd's
112- ceph orch apply osd --all-available-devices
113-
114- # Wait for the osd's to be added
115-
116- echo " Waiting for at least one OSD to be ready..."
117- TIMEOUT=300
118- START_TIME=$( date +%s)
119- while true ; do
120- # Check if any OSD service exists and has at least one running OSD
121- OSD_STATUS=$( ceph orch ls --service-type osd --format json 2> /dev/null | \
122- jq -r ' .[0].status | select(.running != null) | .running >= 1' )
123-
124- # Check if the command succeeded and we got "true"
125- if [ " $OSD_STATUS " = " true" ]; then
126- echo " OSD is ready!"
127- break
128- fi
129-
130- # Check timeout if set
131- if [ " $TIMEOUT " -gt 0 ]; then
132- CURRENT_TIME=$( date +%s)
133- ELAPSED=$(( CURRENT_TIME - START_TIME))
134- if [ " $ELAPSED " -ge " $TIMEOUT " ]; then
135- echo " Timeout reached while waiting for OSD to be ready"
136- exit 1
137- fi
138- fi
139-
140- sleep 5
141- done
142-
143- # view the osd's
144- ceph osd tree
145- # Create a cephfs volume
146- ceph fs volume create cephfs
147-
148- # Create subvolumegroup
149- ceph fs subvolumegroup create cephfs ganeshagroup
150-
151- # Create subvolume
152- ceph fs subvolume create cephfs nfs_subvol --group_name ganeshagroup --namespace-isolated
153-
154- # Get subvolume path
155- CEPHFS_NAME=" cephfs"
156- SUBVOLUME_NAME=" nfs_subvol"
157- GROUP_NAME=" ganeshagroup"
158- SUBVOL_PATH=$( ceph fs subvolume getpath " $CEPHFS_NAME " " $SUBVOLUME_NAME " --group_name " $GROUP_NAME " 2> /dev/null)
159-
160- # Verify path was obtained
161- if [ -z " $SUBVOL_PATH " ]; then
162- echo " ERROR: Failed to get subvolume path."
62+ if [ $RETURN_CODE40 != 0 ] || [ $RETURN_CODE40 != 0 ]; then
63+ echo " pynfs 4.0 test suite failures:"
64+ echo " --------------------------"
65+ cat $LOG_FILE40 | grep FAILURE
66+
67+ echo " pynfs 4.1 test suite failures:"
68+ echo " --------------------------"
69+ cat $LOG_FILE41 | grep FAILURE
16370 exit 1
16471fi
165- echo " Subvolume path: $SUBVOL_PATH "
166-
167- # create ganesha.conf file
168- echo " NFS_CORE_PARAM {
169- Enable_NLM = false;
170- Enable_RQUOTA = false;
171- Protocols = 4;
172- }
173-
174- EXPORT_DEFAULTS {
175- Access_Type = RW;
176- }
177- EXPORT {
178- Export_ID = 101;
179- Path = \" $SUBVOL_PATH \" ;
180- Pseudo = \" /nfs/cephfs\" ;
181- Protocols = 4;
182- Transports = TCP;
183- Access_Type = RW;
184- Squash = None;
185- FSAL {
186- Name = \" CEPH\" ;
187- }
188- }" > /etc/ganesha/ganesha.conf
189-
190- # View the ganesha conf file
191- cat /etc/ganesha/ganesha.conf
192-
193- mkdir -p /var/run/ganesha
194- chmod 755 /var/run/ganesha
195- chown root:root /var/run/ganesha
196-
197- # Creating backend recovery dir for nfs ganesha
198- mkdir -p /var/lib/nfs/ganesha
199- chmod 755 /var/lib/nfs/ganesha
200- chown root:root /var/lib/nfs/ganesha
201-
202- ganesha.nfsd -f /etc/ganesha/ganesha.conf -L /var/log/ganesha.log
203- if pgrep ganesha > /dev/null; then
204- echo " [OK] Service ganesha is running"
205- echo $( pgrep ganesha)
206- else
207- echo " [ERROR] Service ganesha is NOT running" >&2
208- exit 1
209- fi
21072
73+ exit 0
0 commit comments