@@ -4627,6 +4627,140 @@ def test_on_resuming_partly_purged_subvol_purges_fully(self):
46274627 self ._wait_for_trash_empty ()
46284628
46294629
4630+ class TestPauseCloning (TestVolumesHelper ):
4631+ '''
4632+ Tests related to config "mgr/volumes/pause_cloning".
4633+ '''
4634+
4635+ CLIENTS_REQUIRED = 1
4636+ MDSS_REQUIRED = 1
4637+
4638+ CONF_OPT = 'mgr/volumes/pause_cloning'
4639+
4640+ def setUp (self ):
4641+ super ().setUp ()
4642+
4643+ self .NUM_OF_CLONER_THREADS = 4
4644+ self .config_set ('mgr' , 'mgr/volumes/max_concurrent_clones' , self .NUM_OF_CLONER_THREADS )
4645+ self .config_set ('mgr' , 'mgr/volumes/snapshot_clone_no_wait' , 'false' )
4646+
4647+ def tearDown (self ):
4648+ # every test will change value of this config option as per its need.
4649+ # assure that this config option's default value is re-stored during
4650+ # tearDown() so that there's zero chance that it interferes with next
4651+ # test.
4652+ self .config_set ('mgr' , self .CONF_OPT , False )
4653+
4654+ # ensure purge threads have no jobs left from previous test so that
4655+ # next test doesn't have to face unnecessary complications.
4656+ self ._wait_for_trash_empty ()
4657+
4658+ super ().tearDown ()
4659+
4660+ def test_pausing_prevents_news_clones_from_starting (self ):
4661+ v = self .volname
4662+ sv = 'sv1'
4663+ ss = 'ss1'
4664+ c = 'ss1c1'
4665+
4666+ self .run_ceph_cmd (f'fs subvolume create { v } { sv } --mode=777' )
4667+ self ._do_subvolume_io (sv , None , None , 1 , 10 )
4668+ sv_path = self .get_ceph_cmd_stdout (f'fs subvolume getpath { v } '
4669+ f'{ sv } ' )[1 :].strip ()
4670+
4671+ self .run_ceph_cmd (f'fs subvolume snapshot create { v } { sv } { ss } ' )
4672+ self .run_ceph_cmd (f'config set mgr { self .CONF_OPT } true' )
4673+ self .run_ceph_cmd (f'fs subvolume snapshot clone { v } { sv } { ss } { c } ' )
4674+ time .sleep (10 )
4675+
4676+ # n = num of files, value returned by "wc -l"
4677+ n = self .mount_a .get_shell_stdout (f'ls { sv_path } /{ sv } / | wc -l' )
4678+ # num of files should be 0, cloning should've not begun
4679+ self .assertEqual (int (n ), 0 )
4680+
4681+ def test_pausing_halts_ongoing_cloning (self ):
4682+ v = self .volname
4683+ sv = 'sv1'
4684+ ss = 'ss1'
4685+ c = 'ss1c1'
4686+
4687+ NUM_OF_FILES = 3
4688+ self .run_ceph_cmd (f'fs subvolume create { v } { sv } --mode=777' )
4689+ self ._do_subvolume_io (sv , None , None , NUM_OF_FILES , 1024 )
4690+ sv_path = self .get_ceph_cmd_stdout (f'fs subvolume getpath { v } '
4691+ f'{ sv } ' )[1 :].strip ()
4692+
4693+ self .run_ceph_cmd (f'fs subvolume snapshot create { v } { sv } { ss } ' )
4694+ self .run_ceph_cmd (f'fs subvolume snapshot clone { v } { sv } { ss } { c } ' )
4695+ # let few cloning begin...
4696+ time .sleep (2 )
4697+ # ...and now let's pause cloning
4698+ self .run_ceph_cmd (f'config set mgr { self .CONF_OPT } true' )
4699+
4700+ path = os .path .dirname (os .path .dirname (sv_path ))
4701+ uuid = self .mount_a .get_shell_stdout (f'ls { path } /{ c } ' ).strip ()
4702+ # n = num of files, value returned by "wc -l"
4703+ n = self .mount_a .get_shell_stdout (f'ls { path } /{ c } /{ uuid } | wc -l' )
4704+ # num of files should be less or equal number of cloner threads
4705+ self .assertLessEqual (int (n ), self .NUM_OF_CLONER_THREADS )
4706+
4707+ def test_resuming_begins_pending_cloning (self ):
4708+ v = self .volname
4709+ sv = 'sv1'
4710+ ss = 'ss1'
4711+ c = 'ss1c1'
4712+
4713+ NUM_OF_FILES = 3
4714+ self .run_ceph_cmd (f'fs subvolume create { v } { sv } --mode=777' )
4715+ self ._do_subvolume_io (sv , None , None , NUM_OF_FILES , 1024 )
4716+ sv_path = self .get_ceph_cmd_stdout (f'fs subvolume getpath { v } '
4717+ f'{ sv } ' )[1 :].strip ()
4718+
4719+ self .run_ceph_cmd (f'fs subvolume snapshot create { v } { sv } { ss } ' )
4720+ self .run_ceph_cmd (f'config set mgr { self .CONF_OPT } true' )
4721+ self .run_ceph_cmd (f'fs subvolume snapshot clone { v } { sv } { ss } { c } ' )
4722+ time .sleep (2 )
4723+
4724+ # n = num of files, value returned by "wc -l"
4725+ n = self .mount_a .get_shell_stdout (f'ls { sv_path } /{ sv } / | wc -l' )
4726+ # num of files should be 0, cloning should've not begun
4727+ self .assertEqual (int (n ), 0 )
4728+
4729+ self .run_ceph_cmd (f'config set mgr { self .CONF_OPT } false' )
4730+ # test that cloning begun and reached completion
4731+ with safe_while (tries = 3 , sleep = 10 ) as proceed :
4732+ while proceed ():
4733+ n = self .mount_a .get_shell_stdout (f'ls { sv_path } | wc -l' )
4734+ if int (n ) == NUM_OF_FILES :
4735+ break
4736+
4737+ def test_resuming_causes_partly_cloned_subvol_to_clone_fully (self ):
4738+ v = self .volname
4739+ sv = 'sv1'
4740+ ss = 'ss1'
4741+ c = 'ss1c1'
4742+
4743+ NUM_OF_FILES = 3
4744+ self .run_ceph_cmd (f'fs subvolume create { v } { sv } --mode=777' )
4745+ self ._do_subvolume_io (sv , None , None , NUM_OF_FILES , 1024 )
4746+ sv_path = self .get_ceph_cmd_stdout (f'fs subvolume getpath { v } '
4747+ f'{ sv } ' )[1 :].strip ()
4748+
4749+ self .run_ceph_cmd (f'fs subvolume snapshot create { v } { sv } { ss } ' )
4750+ self .run_ceph_cmd (f'fs subvolume snapshot clone { v } { sv } { ss } { c } ' )
4751+ time .sleep (2 )
4752+ self .run_ceph_cmd (f'config set mgr { self .CONF_OPT } true' )
4753+ time .sleep (2 )
4754+
4755+ self .run_ceph_cmd (f'config set mgr { self .CONF_OPT } false' )
4756+ # test that cloning was resumed and reached completion
4757+ with safe_while (tries = 3 , sleep = 10 ) as proceed :
4758+ while proceed ():
4759+ n = self .mount_a .get_shell_stdout (f'ls { sv_path } | wc -l' )
4760+ if int (n ) == NUM_OF_FILES :
4761+ break
4762+
4763+
46304764class TestSubvolumeGroupSnapshots (TestVolumesHelper ):
46314765 """Tests for FS subvolume group snapshot operations."""
46324766 @unittest .skip ("skipping subvolumegroup snapshot tests" )
0 commit comments