Skip to content

Commit 9f74f85

Browse files
qa/cephfs: test ongoing clones counter in CloneProgressReporter
Test that CloneProgressReporter counts number of ongoing clones works fine. Signed-off-by: Rishabh Dave <[email protected]>
1 parent 62d29c6 commit 9f74f85

File tree

2 files changed

+72
-0
lines changed

2 files changed

+72
-0
lines changed

qa/suites/fs/volumes/tasks/volumes/test/clone-progress.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,3 +3,4 @@ tasks:
33
fail_on_skip: false
44
modules:
55
- tasks.cephfs.volumes.test_clone_stats.TestCloneProgressReporter
6+
- tasks.cephfs.volumes.test_clone_stats.TestOngoingClonesCounter

qa/tasks/cephfs/volumes/test_clone_stats.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -791,3 +791,74 @@ def test_when_clones_cancelled_are_more_than_cloner_threads(self):
791791
pass
792792
else:
793793
raise
794+
795+
796+
class TestOngoingClonesCounter(CloneProgressReporterHelper):
797+
'''
798+
Class CloneProgressReporter contains the code that lets it figure out the
799+
number of ongoing clones on its own, without referring the MGR config
800+
option mgr/volumes/max_concurrenr_clones. This class contains tests to
801+
ensure that this code, that does the figuring out, is working fine.
802+
'''
803+
804+
def _run_test(self, MAX_THREADS, NUM_OF_CLONES):
805+
v = self.volname
806+
sv = 'sv1'
807+
ss = 'ss1'
808+
c = self._gen_subvol_clone_name(NUM_OF_CLONES)
809+
810+
self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', 'false')
811+
self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', MAX_THREADS)
812+
self.run_ceph_cmd(f'fs subvolume create {v} {sv} --mode=777')
813+
814+
sv_path = self.get_ceph_cmd_stdout(f'fs subvolume getpath {v} {sv}')
815+
sv_path = sv_path[1:]
816+
817+
size = self._do_subvolume_io(sv, None, None, 3, 1024)
818+
self.run_ceph_cmd(f'fs subvolume snapshot create {v} {sv} {ss}')
819+
self.wait_till_rbytes_is_right(v, sv, size)
820+
821+
for i in c:
822+
self.run_ceph_cmd(f'fs subvolume snapshot clone {v} {sv} {ss} {i}')
823+
824+
msg = ('messages for progress bars for snapshot cloning are not how '
825+
'they were expected')
826+
with safe_while(tries=20, sleep=1, action=msg) as proceed:
827+
while proceed():
828+
pevs = self.get_pevs_from_ceph_status(c)
829+
830+
if len(pevs) <= 1:
831+
continue # let's wait for second progress bar to appear
832+
elif len(pevs) > 2:
833+
raise RuntimeError(
834+
'More than 2 progress bars were found in the output '
835+
'of "ceph status" command.\nprogress events -'
836+
f'\n{pevs}')
837+
838+
msg = ('"progress_events" dict in "ceph -s" output must have '
839+
f'only two entries.\n{pevs}')
840+
self.assertEqual(len(pevs), 2, msg)
841+
pev1, pev2 = pevs.values()
842+
pev1_msg, pev2_msg = pev1['message'].lower(), pev2['message'].lower()
843+
if 'ongoing clones' in pev1_msg and 'total ' in pev2_msg:
844+
if f'{MAX_THREADS} ongoing clones' in pev1_msg:
845+
break
846+
elif 'ongoing clones' in pev2_msg and 'total ' in pev1_msg:
847+
if f'{MAX_THREADS} ongoing clones' in pev2_msg:
848+
break
849+
else:
850+
raise RuntimeError(msg)
851+
852+
self.cancel_clones_and_ignore_if_finished(c)
853+
for i in c:
854+
self._wait_for_clone_to_be_canceled(i)
855+
self._wait_for_clone_progress_bars_to_be_removed()
856+
857+
def test_for_2_ongoing_clones(self):
858+
self._run_test(MAX_THREADS=2, NUM_OF_CLONES=5)
859+
860+
def test_for_4_ongoing_clones(self):
861+
self._run_test(MAX_THREADS=4, NUM_OF_CLONES=8)
862+
863+
def test_for_6_ongoing_clones(self):
864+
self._run_test(MAX_THREADS=6, NUM_OF_CLONES=16)

0 commit comments

Comments
 (0)