@@ -7876,7 +7876,22 @@ def tearDown(self):
78767876 self .run_ceph_cmd ('fs subvolume snapshot rm --force '
78777877 f'--format json { v } { sv } { ss } ' )
78787878
7879- self .run_ceph_cmd (f'fs subvolume rm { v } { sv } ' )
7879+ try :
7880+ self .run_ceph_cmd (f'fs subvolume rm { v } { sv } ' )
7881+ except CommandFailedError as e :
7882+ if e .exitstatus == errno .ENOENT :
7883+ log .info (
7884+ 'ignoring this error, perhaps subvolume was deleted '
7885+ 'during the test and snapshot deleted above is a '
7886+ 'retained snapshot. when a retained snapshot (which is '
7887+ 'snapshot retained despite of subvolume deletion) is '
7888+ 'deleted, the subvolume directory is also deleted '
7889+ 'along. and before retained snapshot deletion, the '
7890+ 'subvolume is reported by "subvolume ls" command, which'
7891+ 'is what probably caused confusion here' )
7892+ pass
7893+ else :
7894+ raise
78807895
78817896 # verify trash dir is clean
78827897 self ._wait_for_trash_empty ()
@@ -8090,6 +8105,58 @@ def test_clone_to_diff_group_and_less_than_cloner_threads(self):
80908105 # and not cancelling these clone doesnt affect this test case.
80918106 self .cancel_clones_and_ignore_if_finished (c )
80928107
8108+ def test_clone_after_subvol_is_removed (self ):
8109+ '''
8110+ Initiate cloning after source subvolume has been deleted but with
8111+ snapshots retained and then test that, when this clone is in progress,
8112+ one progress bar is printed in output of command "ceph status" that
8113+ shows progress of this clone.
8114+ '''
8115+ v = self .volname
8116+ sv = 'sv1'
8117+ ss = 'ss1'
8118+ # XXX: "clone" must be part of clone name for sake of tearDown()
8119+ c = 'ss1clone1'
8120+
8121+ # XXX: without setting mds_snap_rstat to true rstats are not updated on
8122+ # a subvolume snapshot and therefore clone progress bar will not show
8123+ # any progress.
8124+ self .config_set ('mds' , 'mds_snap_rstat' , 'true' )
8125+
8126+ self .run_ceph_cmd (f'fs subvolume create { v } { sv } --mode=777' )
8127+ size = self ._do_subvolume_io (sv , None , None , 10 , 1024 )
8128+
8129+ self .run_ceph_cmd (f'fs subvolume snapshot create { v } { sv } { ss } ' )
8130+ self .wait_till_rbytes_is_right (v , sv , size )
8131+
8132+ self .run_ceph_cmd (f'fs subvolume rm { v } { sv } --retain-snapshots' )
8133+ self .run_ceph_cmd (f'fs subvolume snapshot clone { v } { sv } { ss } { c } ' )
8134+
8135+ with safe_while (tries = 15 , sleep = 10 ) as proceed :
8136+ while proceed ():
8137+ pev = self .get_pevs_from_ceph_status (c )
8138+
8139+ if len (pev ) < 1 :
8140+ continue
8141+ elif len (pev ) > 1 :
8142+ raise RuntimeError ('For 1 clone "ceph status" output has 2 '
8143+ 'progress bars, it should have only 1 '
8144+ f'progress bar.\n pev -\n { pev } ' )
8145+
8146+ # ensure that exactly 1 progress bar for cloning is present in
8147+ # "ceph status" output
8148+ msg = ('"progress_events" dict in "ceph status" output must have '
8149+ f'exactly one entry.\n progress_event dict -\n { pev } ' )
8150+ self .assertEqual (len (pev ), 1 , msg )
8151+
8152+ pev_msg = tuple (pev .values ())[0 ]['message' ]
8153+ self .assertIn ('1 ongoing clones' , pev_msg )
8154+ break
8155+
8156+ # allowing clone jobs to finish will consume too much time and space
8157+ # and not cancelling these clone doesnt affect this test case.
8158+ self .cancel_clones_and_ignore_if_finished (c )
8159+
80938160 def test_clones_equal_to_cloner_threads (self ):
80948161 '''
80958162 Test that one progress bar is printed in output of "ceph status" output
0 commit comments