Skip to content

Commit ea624e4

Browse files
author
neeraj pratap singh
committed
qa: add test for subvolume rm with retained snapshots when cluster is full
Fixes: https://tracker.ceph.com/issues/67330 Signed-off-by: Neeraj Pratap Singh <[email protected]> Conflicts: qa/suites/fs/full/tasks/mgr-osd-full.yaml
1 parent ba199cf commit ea624e4

File tree

2 files changed

+72
-0
lines changed

2 files changed

+72
-0
lines changed

qa/suites/fs/full/tasks/mgr-osd-full.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,3 +34,8 @@ tasks:
3434
clients:
3535
client.0:
3636
- fs/full/subvolume_ls.sh
37+
- workunit:
38+
cleanup: true
39+
clients:
40+
client.0:
41+
- fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
#!/usr/bin/env bash
2+
set -ex
3+
4+
# Test that command 'ceph fs subvolume rm --retained-snapshots' fails when the
5+
# OSD is full.
6+
#
7+
# A subvolume is created on a cluser with OSD size 2GB and a 1GB file is written on
8+
# the subvolume. The OSD size is then set to below 500MB so that OSD is treated as
9+
# full. Now the subvolume is removed but snapshots are retained.
10+
11+
expect_failure() {
12+
if "$@"; then return 1; else return 0; fi
13+
}
14+
15+
ceph fs subvolume create cephfs sub_0
16+
subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
17+
18+
echo "Printing system disk usages for host as well Ceph before writing on subvolume"
19+
df -h
20+
ceph osd df
21+
22+
sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/1GB_file-1 status=progress bs=1M count=1000
23+
24+
ceph osd set-full-ratio 0.2
25+
ceph osd set-nearfull-ratio 0.16
26+
ceph osd set-backfillfull-ratio 0.18
27+
28+
timeout=30
29+
while [ $timeout -gt 0 ]
30+
do
31+
health=$(ceph health detail)
32+
[[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
33+
echo "Wating for osd to be full: $timeout"
34+
sleep 1
35+
let "timeout-=1"
36+
done
37+
38+
echo "Printing disk usage for host as well as Ceph after OSD ratios have been set"
39+
df -h
40+
ceph osd df
41+
42+
#Take snapshot
43+
ceph fs subvolume snapshot create cephfs sub_0 snap_0
44+
45+
#Delete subvolume with retain snapshot fails
46+
expect_failure ceph fs subvolume rm cephfs sub_0 --retain-snapshots
47+
48+
#Validate subvolume is not deleted
49+
ceph fs subvolume info cephfs sub_0
50+
51+
# Validate config file is not truncated and GLOBAL section exists
52+
sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/sub_0/.meta
53+
54+
# Hard cleanup
55+
sudo rmdir $CEPH_MNT/volumes/_nogroup/sub_0/.snap/snap_0
56+
sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
57+
58+
#Reset the ratios to original values for the sake of rest of tests
59+
ceph osd set-full-ratio 0.95
60+
ceph osd set-nearfull-ratio 0.95
61+
ceph osd set-backfillfull-ratio 0.95
62+
63+
echo "Printing disk usage for host as well as Ceph since test has been finished"
64+
df -h
65+
ceph osd df
66+
67+
echo OK

0 commit comments

Comments
 (0)