Skip to content

Commit 18d5cae

Browse files
committed
qa: remove redundant and broken test
Scrub does not fix damaged dirfrags for any type of damage we currently mark dirfrags damaged for (corrupt fnode / missing dirfrag object). In any case, this scenario is covered in cephfs_data_scan with correct checks for damage / handling. Fixes: 7f0cf0b Signed-off-by: Patrick Donnelly <[email protected]>
1 parent 21d0992 commit 18d5cae

File tree

1 file changed

+0
-81
lines changed

1 file changed

+0
-81
lines changed

qa/tasks/cephfs/test_forward_scrub.py

Lines changed: 0 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -390,87 +390,6 @@ def test_health_status_after_dentry_repair(self):
390390
# Clean up the omap object
391391
self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
392392

393-
def test_health_status_after_dirfrag_repair(self):
394-
"""
395-
Test that the damage health status is cleared
396-
after the damaged dirfrag is repaired
397-
"""
398-
self.mount_a.run_shell(["mkdir", "dir"])
399-
self.mount_a.run_shell(["touch", "dir/file"])
400-
self.mount_a.run_shell(["mkdir", "testdir"])
401-
self.mount_a.run_shell(["ln", "dir/file", "testdir/hardlink"])
402-
403-
dir_ino = self.mount_a.path_to_ino("dir")
404-
405-
# Ensure everything is written to backing store
406-
self.mount_a.umount_wait()
407-
self.fs.mds_asok(["flush", "journal"])
408-
409-
# Drop everything from the MDS cache
410-
self.fs.fail()
411-
412-
self.fs.radosm(["rm", "{0:x}.00000000".format(dir_ino)])
413-
414-
self.fs.journal_tool(['journal', 'reset', '--yes-i-really-really-mean-it'], 0)
415-
self.fs.set_joinable()
416-
self.fs.wait_for_daemons()
417-
self.mount_a.mount_wait()
418-
419-
# Check that touching the hardlink gives EIO
420-
ran = self.mount_a.run_shell(["stat", "testdir/hardlink"], wait=False)
421-
try:
422-
ran.wait()
423-
except CommandFailedError:
424-
self.assertTrue("Input/output error" in ran.stderr.getvalue())
425-
426-
out_json = self.fs.run_scrub(["start", "/dir", "recursive"])
427-
self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
428-
429-
# Check that an entry is created in the damage table
430-
damage = json.loads(
431-
self.fs.mon_manager.raw_cluster_cmd(
432-
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
433-
"damage", "ls", '--format=json-pretty'))
434-
self.assertEqual(len(damage), 3)
435-
damage_types = set()
436-
for i in range(0, 3):
437-
damage_types.add(damage[i]['damage_type'])
438-
self.assertIn("dir_frag", damage_types)
439-
self.wait_until_true(lambda: self._is_MDS_damage(), timeout=100)
440-
441-
out_json = self.fs.run_scrub(["start", "/dir", "recursive,repair"])
442-
self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
443-
444-
# Check that the entry is cleared from the damage table
445-
damage = json.loads(
446-
self.fs.mon_manager.raw_cluster_cmd(
447-
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
448-
"damage", "ls", '--format=json-pretty'))
449-
self.assertEqual(len(damage), 1)
450-
self.assertNotEqual(damage[0]['damage_type'], "dir_frag")
451-
452-
self.mount_a.umount_wait()
453-
self.fs.mds_asok(["flush", "journal"])
454-
self.fs.fail()
455-
456-
# Run cephfs-data-scan
457-
self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()])
458-
self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()])
459-
self.fs.data_scan(["scan_links"])
460-
461-
self.fs.set_joinable()
462-
self.fs.wait_for_daemons()
463-
self.mount_a.mount_wait()
464-
465-
out_json = self.fs.run_scrub(["start", "/dir", "recursive,repair"])
466-
self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
467-
damage = json.loads(
468-
self.fs.mon_manager.raw_cluster_cmd(
469-
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
470-
"damage", "ls", '--format=json-pretty'))
471-
self.assertEqual(len(damage), 0)
472-
self.wait_until_true(lambda: not self._is_MDS_damage(), timeout=100)
473-
474393
def test_health_status_after_backtrace_repair(self):
475394
"""
476395
Test that the damage health status is cleared

0 commit comments

Comments
 (0)