Skip to content

Commit cb33dfa

Browse files
Merge pull request #108 from pranavprakash20/add_ltp_tests
[LTP] Add support for LTP tests
2 parents 5258886 + a08f0dc commit cb33dfa

File tree

4 files changed

+282
-229
lines changed

4 files changed

+282
-229
lines changed

build_scripts/ltp/run-ltp-tests.sh

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
#!/bin/sh
2+
#
3+
# Environment variables used:
4+
# - SERVER: hostname or IP-address of the NFS-server
5+
# - EXPORT: NFS-export to test (should start with "/")
6+
7+
echo "Client Script for executing LTP"
8+
9+
# enable some more output
10+
set -x
11+
12+
[ -n "${SERVER}" ]
13+
[ -n "${EXPORT}" ]
14+
15+
# install build and runtime dependencies
16+
dnf install -y git gcc gcc-c++ make automake autoconf pkgconf pkgconf-pkg-config libtool bison flex perl perl-Time-HiRes python3 wget tar libaio-devel net-tools nfs-utils
17+
18+
git clone https://github.com/linux-test-project/ltp.git
19+
20+
cd ltp;make autotools;./configure;make -j$(nproc); make install
21+
22+
# Mount nfs
23+
# v3 mount
24+
mkdir -p /mnt/nfsv3
25+
mount -t nfs -o vers=3 ${SERVER}:${EXPORT} /mnt/nfsv3
26+
27+
# v4 mount
28+
mkdir -p /mnt/nfsv4
29+
mount -t nfs -o vers=4 ${SERVER}:${EXPORT} /mnt/nfsv4
30+
31+
# v4.2 mount
32+
mkdir -p /mnt/nfsv42
33+
mount -t nfs -o vers=4.2 ${SERVER}:${EXPORT} /mnt/nfsv42
34+
35+
# Run ltp on v3 mount
36+
cd /opt/ltp; sudo ./runltp -d /mnt/nfsv3 -f fs -o /tmp/ltp_output_nfsv3.log -l /tmp/ltp_run_nfsv3.log -p
37+
38+
# Run ltp on v4.2 mount
39+
cd /opt/ltp; sudo ./runltp -d /mnt/nfsv42 -f fs -o /tmp/ltp_output_nfsv42.log -l /tmp/ltp_run_nfsv42.log -p
40+
41+
# Run ltp on v4 mount
42+
cd /opt/ltp; sudo ./runltp -d /mnt/nfsv4 -f fs -o /tmp/ltp_output_nfsv4.log -l /tmp/ltp_run_nfsv4.log -p

ceph/basic-ceph-cthon.sh

Lines changed: 29 additions & 229 deletions
Original file line numberDiff line numberDiff line change
@@ -6,207 +6,8 @@ set -e
66
# enable some more output
77
set -x
88

9-
# these variables need to be set
10-
[ -n "${GERRIT_HOST}" ]
11-
[ -n "${GERRIT_PROJECT}" ]
12-
[ -n "${GERRIT_REFSPEC}" ]
13-
14-
# only use https for now
15-
GIT_REPO="https://${GERRIT_HOST}/${GERRIT_PROJECT}"
16-
17-
# enable the Storage SIG Gluster and Ceph repositories
18-
dnf -y install centos-release-ceph epel-release
19-
20-
BUILDREQUIRES="git bison cmake dbus-devel flex gcc-c++ krb5-devel libacl-devel libblkid-devel libcap-devel redhat-rpm-config rpm-build xfsprogs-devel lvm2"
21-
22-
BUILDREQUIRES_EXTRA="libnsl2-devel libnfsidmap-devel libwbclient-devel userspace-rcu-devel"
23-
24-
# basic packages to install
25-
case "${CENTOS_VERSION}" in
26-
7)
27-
yum install -y ${BUILDREQUIRES} ${BUILDREQUIRES_EXTRA} python2-devel
28-
;;
29-
8s)
30-
yum install -y ${BUILDREQUIRES}
31-
yum install --enablerepo=powertools -y ${BUILDREQUIRES_EXTRA}
32-
yum install -y libcephfs-devel
33-
;;
34-
9s)
35-
yum install -y ${BUILDREQUIRES}
36-
yum install --enablerepo=crb -y ${BUILDREQUIRES_EXTRA}
37-
yum install -y libcephfs-devel
38-
;;
39-
esac
40-
41-
git clone --depth=1 ${GIT_REPO}
42-
cd $(basename "${GERRIT_PROJECT}")
43-
git fetch origin ${GERRIT_REFSPEC} && git checkout FETCH_HEAD
44-
45-
# update libntirpc
46-
git submodule update --recursive --init || git submodule sync
47-
48-
# cleanup old build dir
49-
[ -d build ] && rm -rf build
50-
51-
mkdir build
52-
cd build
53-
54-
( cmake ../src -DCMAKE_BUILD_TYPE=Maintainer -DUSE_FSAL_GLUSTER=OFF -DUSE_FSAL_CEPH=ON -DUSE_FSAL_RGW=OFF -DUSE_DBUS=ON -DUSE_ADMIN_TOOLS=ON && make) || touch FAILED
55-
make install
56-
57-
# dont vote if the subject of the last change includes the word "WIP"
58-
if ( git log --oneline -1 | grep -q -i -w 'WIP' )
59-
then
60-
echo "Change marked as WIP, not posting result to GerritHub."
61-
touch WIP
62-
fi
63-
64-
# If failure found during build, return the status and skip proceeding
65-
# to ceph configuration
66-
67-
68-
# we accept different return values
69-
# 0 - SUCCESS + VOTE
70-
# 1 - FAILED + VOTE
71-
# 10 - SUCCESS + REPORT ONLY (NO VOTE)
72-
# 11 - FAILED + REPORT ONLY (NO VOTE)
73-
RET=0
74-
if [ -e FAILED ]
75-
then
76-
exit ${RET}
77-
fi
78-
if [ -e WIP ]
79-
then
80-
RET=$[RET + 10]
81-
exit ${RET}
82-
fi
83-
84-
# Create a virtual disk file (for OSD storage):
85-
truncate -s 35G /tmp/ceph-disk.img
86-
losetup -f /tmp/ceph-disk.img # Attaches as a loop device (e.g., /dev/loop0)
87-
88-
pvcreate /dev/loop0
89-
vgcreate ceph-vg /dev/loop0
90-
lvcreate -L 10G -n osd1 ceph-vg
91-
lvcreate -L 10G -n osd2 ceph-vg
92-
lvcreate -L 10G -n osd3 ceph-vg
93-
94-
# Install and configure ceph cluster
95-
dnf install -y cephadm
96-
cephadm add-repo --release squid
97-
dnf install -y ceph
98-
cephadm bootstrap --mon-ip $(hostname -I | awk '{print $1}') --single-host-defaults --allow-fqdn-hostname
99-
ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring
100-
101-
# Attach the virtual disks
102-
ceph-volume lvm create --data /dev/ceph-vg/osd1
103-
ceph-volume lvm create --data /dev/ceph-vg/osd2
104-
ceph-volume lvm create --data /dev/ceph-vg/osd3
105-
106-
# Verifying the disks
107-
lvdisplay
108-
lsblk
109-
ceph orch device ls
110-
111-
# Now auto assign these lvms to the osd's
112-
ceph orch apply osd --all-available-devices
113-
114-
# Wait for the osd's to be added
115-
116-
echo "Waiting for at least one OSD to be ready..."
117-
TIMEOUT=300
118-
START_TIME=$(date +%s)
119-
while true; do
120-
# Check if any OSD service exists and has at least one running OSD
121-
OSD_STATUS=$(ceph orch ls --service-type osd --format json 2>/dev/null | \
122-
jq -r '.[0].status | select(.running != null) | .running >= 1')
123-
124-
# Check if the command succeeded and we got "true"
125-
if [ "$OSD_STATUS" = "true" ]; then
126-
echo "OSD is ready!"
127-
break
128-
fi
129-
130-
# Check timeout if set
131-
if [ "$TIMEOUT" -gt 0 ]; then
132-
CURRENT_TIME=$(date +%s)
133-
ELAPSED=$((CURRENT_TIME - START_TIME))
134-
if [ "$ELAPSED" -ge "$TIMEOUT" ]; then
135-
echo "Timeout reached while waiting for OSD to be ready"
136-
exit 1
137-
fi
138-
fi
139-
140-
sleep 5
141-
done
142-
143-
# view the osd's
144-
ceph osd tree
145-
# Create a cephfs volume
146-
ceph fs volume create cephfs
147-
148-
# Create subvolumegroup
149-
ceph fs subvolumegroup create cephfs ganeshagroup
150-
151-
# Create subvolume
152-
ceph fs subvolume create cephfs nfs_subvol --group_name ganeshagroup --namespace-isolated
153-
154-
# Get subvolume path
155-
CEPHFS_NAME="cephfs"
156-
SUBVOLUME_NAME="nfs_subvol"
157-
GROUP_NAME="ganeshagroup"
158-
SUBVOL_PATH=$(ceph fs subvolume getpath "$CEPHFS_NAME" "$SUBVOLUME_NAME" --group_name "$GROUP_NAME" 2>/dev/null)
159-
160-
# Verify path was obtained
161-
if [ -z "$SUBVOL_PATH" ]; then
162-
echo "ERROR: Failed to get subvolume path."
163-
exit 1
164-
fi
165-
echo "Subvolume path: $SUBVOL_PATH"
166-
167-
# create ganesha.conf file
168-
echo "NFS_CORE_PARAM {
169-
Enable_NLM = false;
170-
Enable_RQUOTA = false;
171-
Protocols = 4;
172-
}
173-
174-
EXPORT_DEFAULTS {
175-
Access_Type = RW;
176-
}
177-
EXPORT {
178-
Export_ID = 101;
179-
Path = \"$SUBVOL_PATH\";
180-
Pseudo = \"/nfs/cephfs\";
181-
Protocols = 4;
182-
Transports = TCP;
183-
Access_Type = RW;
184-
Squash = None;
185-
FSAL {
186-
Name = \"CEPH\";
187-
}
188-
}" > /etc/ganesha/ganesha.conf
189-
190-
# View the ganesha conf file
191-
cat /etc/ganesha/ganesha.conf
192-
193-
mkdir -p /var/run/ganesha
194-
chmod 755 /var/run/ganesha
195-
chown root:root /var/run/ganesha
196-
197-
# Creating backend recovery dir for nfs ganesha
198-
mkdir -p /var/lib/nfs/ganesha
199-
chmod 755 /var/lib/nfs/ganesha
200-
chown root:root /var/lib/nfs/ganesha
201-
202-
ganesha.nfsd -f /etc/ganesha/ganesha.conf -L /var/log/ganesha.log
203-
if pgrep ganesha >/dev/null; then
204-
echo "[OK] Service ganesha is running"
205-
echo $(pgrep ganesha)
206-
else
207-
echo "[ERROR] Service ganesha is NOT running" >&2
208-
exit 1
209-
fi
9+
# Run basic ceph deployment and ganesha service
10+
sh $WORKSPACE/ci-tests/ceph/basic-ceph.sh
21011

21112
# Run Cthon post successful cluster creation
21213

@@ -232,25 +33,25 @@ cd cthon04
23233

23334

23435
# RUN CTHON for v3 < SKIPPING AS CURRENT RUNS FOCUS ON 4 and 4.1 >
235-
#mkdir -p /mnt/nfs_ceph_v3
236-
#if mount -t nfs -o vers=3 $(hostname -I | awk '{print $1}'):/nfs/cephfs /mnt/nfs_ceph_v3; then
237-
# echo "NFS mount successful!"
238-
# if mountpoint -q /mnt/nfs_ceph_v3; then
239-
# echo "Verification: NFS is properly mounted at /mnt/nfs_ceph_v3"
240-
# echo "Mounted NFS details:"
241-
# mount | grep /mnt/nfs_ceph_v3
242-
# else
243-
# echo "ERROR: Mount command succeeded but verification failed!" >&2
244-
# exit 1
245-
# fi
246-
#else
247-
# echo "ERROR: Failed to mount NFS share!" >&2
248-
# touch FAILED
249-
# exit 1
250-
#fi
251-
## Run Cthon
252-
#./server -a -p /nfs/cephfs -m /mnt/nfs_ceph_v3 $(hostname -I | awk '{print $1}')
253-
36+
mkdir -p /mnt/nfs_ceph_v3
37+
if mount -t nfs -o vers=3 $(hostname -I | awk '{print $1}'):/nfs/cephfs /mnt/nfs_ceph_v3; then
38+
echo "NFS mount successful!"
39+
if mountpoint -q /mnt/nfs_ceph_v3; then
40+
echo "Verification: NFS is properly mounted at /mnt/nfs_ceph_v3"
41+
echo "Mounted NFS details:"
42+
mount | grep /mnt/nfs_ceph_v3
43+
else
44+
echo "ERROR: Mount command succeeded but verification failed!" >&2
45+
exit 1
46+
fi
47+
else
48+
echo "ERROR: Failed to mount NFS share!" >&2
49+
touch FAILED
50+
exit 1
51+
fi
52+
# Run Cthon
53+
./server -a -p /nfs/cephfs -m /mnt/nfs_ceph_v3 $(hostname -I | awk '{print $1}')
54+
./test $(hostname -I | awk '{print $1}'):/nfs/cephfs /mnt/nfs_ceph_v3
25455

25556
# Run CTHON for v4.0
25657
mkdir -p /mnt/nfs_ceph_v4
@@ -271,22 +72,21 @@ else
27172
exit 1
27273
fi
27374
# Run Cthon
75+
27476
if [ "${CONCURRENT_JOBS}" == "True" ]; then
27577
./server -c 100000 /nfs/cephfs -m /mnt/nfs_ceph_v4 $(hostname -I | awk '{print $1}')
27678
else
27779
./server -a -p /nfs/cephfs -m /mnt/nfs_ceph_v4 $(hostname -I | awk '{print $1}')
27880
fi
27981

280-
281-
282-
# Run CTHON for v4.1
283-
mkdir -p /mnt/nfs_ceph_v41
284-
if mount -t nfs -o vers=4.1 $(hostname -I | awk '{print $1}'):/nfs/cephfs /mnt/nfs_ceph_v41; then
82+
# Run CTHON for v4.2
83+
mkdir -p /mnt/nfs_ceph_v42
84+
if mount -t nfs -o vers=4.2 $(hostname -I | awk '{print $1}'):/nfs/cephfs /mnt/nfs_ceph_v42; then
28585
echo "NFS mount successful!"
286-
if mountpoint -q /mnt/nfs_ceph_v41; then
287-
echo "Verification: NFS is properly mounted at /mnt/nfs_ceph_v41"
86+
if mountpoint -q /mnt/nfs_ceph_v42; then
87+
echo "Verification: NFS is properly mounted at /mnt/nfs_ceph_v42"
28888
echo "Mounted NFS details:"
289-
mount | grep /mnt/nfs_ceph_v41
89+
mount | grep /mnt/nfs_ceph_v42
29090
else
29191
echo "ERROR: Mount command succeeded but verification failed!" >&2
29292
exit 1
@@ -303,5 +103,5 @@ if [ "${CONCURRENT_JOBS}" == "True" ]; then
303103
else
304104
./server -a -p /nfs/cephfs -m /mnt/nfs_ceph_v4 $(hostname -I | awk '{print $1}')
305105
fi
306-
exit 0
307106

107+
exit 0

0 commit comments

Comments
 (0)