Skip to content

Commit ecce751

Browse files
committed
qa: Edit test files to incorporate unset arguments, crush_rule, size,
min_size Fixes: https://tracker.ceph.com/issues/68842 Signed-off-by: Kamoltat Sirivadhna <[email protected]>
1 parent cc66889 commit ecce751

File tree

3 files changed

+39
-4
lines changed

3 files changed

+39
-4
lines changed

qa/tasks/stretch_cluster.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ class TestStretchCluster(MgrTestCase):
5858
PEERING_CRUSH_BUCKET_TARGET = 3
5959
PEERING_CRUSH_BUCKET_BARRIER = 'datacenter'
6060
CRUSH_RULE = 'replicated_rule_custom'
61+
DEFAULT_CRUSH_RULE = 'replicated_rule'
6162
SIZE = 6
6263
MIN_SIZE = 3
6364
BUCKET_MAX = SIZE // PEERING_CRUSH_BUCKET_TARGET
@@ -594,6 +595,17 @@ def test_mon_failures_in_stretch_pool(self):
594595
success_hold_time=self.SUCCESS_HOLD_TIME
595596
)
596597

598+
# Unset the pool back to replicated rule expects PGs to be 100% active+clean
599+
self.mgr_cluster.mon_manager.raw_cluster_cmd(
600+
'osd', 'pool', 'stretch', 'unset',
601+
self.POOL, self.DEFAULT_CRUSH_RULE,
602+
str(self.SIZE), str(self.MIN_SIZE))
603+
self.wait_until_true_and_hold(
604+
lambda: self._pg_all_active_clean(),
605+
timeout=self.RECOVERY_PERIOD,
606+
success_hold_time=self.SUCCESS_HOLD_TIME
607+
)
608+
597609
def test_set_stretch_pool_no_active_pgs(self):
598610
"""
599611
Test setting a pool to stretch cluster and checks whether
@@ -686,10 +698,20 @@ def test_set_stretch_pool_no_active_pgs(self):
686698
timeout=self.RECOVERY_PERIOD,
687699
success_hold_time=self.SUCCESS_HOLD_TIME)
688700

689-
# Bring back osds iin DC2 expects PGs to be 100% active+clean
701+
# Bring back osds in DC2 expects PGs to be 100% active+clean
690702
self._bring_back_all_osds_in_dc('dc2')
691703
self.wait_until_true_and_hold(
692704
lambda: self._pg_all_active_clean(),
693705
timeout=self.RECOVERY_PERIOD,
694706
success_hold_time=self.SUCCESS_HOLD_TIME
695707
)
708+
# Unset the pool back to replicated rule expects PGs to be 100% active+clean
709+
self.mgr_cluster.mon_manager.raw_cluster_cmd(
710+
'osd', 'pool', 'stretch', 'unset',
711+
self.POOL, self.DEFAULT_CRUSH_RULE,
712+
str(self.SIZE), str(self.MIN_SIZE))
713+
self.wait_until_true_and_hold(
714+
lambda: self._pg_all_active_clean(),
715+
timeout=self.RECOVERY_PERIOD,
716+
success_hold_time=self.SUCCESS_HOLD_TIME
717+
)

qa/tasks/test_netsplit_3az_stretch_pool.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ class TestNetSplit(CephTestCase):
2121
PEERING_CRUSH_BUCKET_BARRIER = 'datacenter'
2222
POOL = 'pool_stretch'
2323
CRUSH_RULE = 'replicated_rule_custom'
24+
DEFAULT_CRUSH_RULE = 'replicated_rule'
2425
SIZE = 6
2526
MIN_SIZE = 3
2627
BUCKET_MAX = SIZE // PEERING_CRUSH_BUCKET_TARGET
@@ -278,4 +279,14 @@ def test_mon_netsplit(self):
278279
timeout=self.RECOVERY_PERIOD,
279280
success_hold_time=self.SUCCESS_HOLD_TIME
280281
)
282+
# Unset the pool back to replicated rule expects PGs to be 100% active+clean
283+
self.mgr_cluster.mon_manager.raw_cluster_cmd(
284+
'osd', 'pool', 'stretch', 'unset',
285+
self.POOL, self.DEFAULT_CRUSH_RULE,
286+
str(self.SIZE), str(self.MIN_SIZE))
287+
self.wait_until_true_and_hold(
288+
lambda: self._pg_all_active_clean(),
289+
timeout=self.RECOVERY_PERIOD,
290+
success_hold_time=self.SUCCESS_HOLD_TIME
291+
)
281292
log.info("test_mon_netsplit passed!")

qa/workunits/mon/mon-stretch-pool.sh

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,10 +97,12 @@ expect_false ceph osd pool stretch set non_exist_pool 2 3 datacenter $TEST_CRUSH
9797
expect_false ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 non_exist_barrier $TEST_CRUSH_RULE 6 3
9898
# Non existence crush_rule should return appropriate error
9999
expect_false ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 datacenter $TEST_CRUSH_RULE 6 3
100+
# Unsetting a pool with missing arguments
101+
expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH
100102
# Unsetting a non existence pool should return error
101-
expect_false ceph osd pool stretch unset non_exist_pool
103+
expect_false ceph osd pool stretch unset non_exist_pool replicated_rule 6 3
102104
# Unsetting a non-stretch pool should return error
103-
expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH
105+
expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH replicated_rule 6 3
104106

105107
# Create a custom crush rule
106108
ceph osd getcrushmap > crushmap
@@ -139,7 +141,7 @@ expect_true ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 datacenter $TEST_CR
139141
expect_true ceph osd pool stretch show $TEST_POOL_STRETCH
140142

141143
# Unset the stretch pool and expects it to work
142-
expect_true ceph osd pool stretch unset $TEST_POOL_STRETCH
144+
expect_true ceph osd pool stretch unset $TEST_POOL_STRETCH replicated_rule 6 3
143145
# try to show the stretch pool values again, should return error since
144146
# the pool is not a stretch pool anymore.
145147
expect_false ceph osd pool stretch show $TEST_POOL_STRETCH

0 commit comments

Comments
 (0)