@@ -100,3 +100,29 @@ def test_backtrace(self):
100100 # we don't update the layout in all the old pools whenever it changes
101101 old_pool_layout = self .fs .read_layout (file_ino , pool = old_data_pool_name )
102102 self .assertEqual (old_pool_layout ['object_size' ], 4194304 )
103+
104+ def test_backtrace_flush_on_deleted_data_pool (self ):
105+ """
106+ that the MDS does not go read-only when handling backtrace update errors
107+ when backtrace updates are batched and flushed to RADOS (during journal trim)
108+ and some of the pool have been removed.
109+ """
110+ data_pool = self .fs .get_data_pool_name ()
111+ extra_data_pool_name_1 = data_pool + '_extra1'
112+ self .fs .add_data_pool (extra_data_pool_name_1 )
113+
114+ self .mount_a .run_shell (["mkdir" , "dir_x" ])
115+ self .mount_a .setfattr ("dir_x" , "ceph.dir.layout.pool" , extra_data_pool_name_1 )
116+ self .mount_a .run_shell (["touch" , "dir_x/file_x" ])
117+ self .fs .flush ()
118+
119+ extra_data_pool_name_2 = data_pool + '_extra2'
120+ self .fs .add_data_pool (extra_data_pool_name_2 )
121+ self .mount_a .setfattr ("dir_x/file_x" , "ceph.file.layout.pool" , extra_data_pool_name_2 )
122+ self .mount_a .run_shell (["setfattr" , "-x" , "ceph.dir.layout" , "dir_x" ])
123+ self .run_ceph_cmd ("fs" , "rm_data_pool" , self .fs .name , extra_data_pool_name_1 )
124+ self .fs .flush ()
125+
126+ # quick test to check if the mds has handled backtrace update failure
127+ # on the deleted data pool without going read-only.
128+ self .mount_a .run_shell (["mkdir" , "dir_y" ])
0 commit comments