Skip to content

Commit 6a36abc

Browse files
authored
fix: tests fixes #74 (#557)
* model calls for consume/offer breaking in tests * use scaling strategy to reproduce crash of primary previous to secondaries join
1 parent db6ee23 commit 6a36abc

File tree

2 files changed

+22
-51
lines changed

2 files changed

+22
-51
lines changed

tests/integration/high_availability/test_async_replication.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -124,13 +124,17 @@ async def test_build_and_deploy(
124124
@markers.juju3
125125
@markers.amd64_only # TODO: remove after mysql-router-k8s arm64 stable release
126126
@pytest.mark.abort_on_fail
127-
async def test_async_relate(first_model: Model, second_model: Model) -> None:
127+
async def test_async_relate(ops_test: OpsTest, first_model: Model, second_model: Model) -> None:
128128
"""Relate the two mysql clusters."""
129129
logger.info("Creating offers in first model")
130-
await first_model.create_offer(f"{MYSQL_APP1}:replication-offer")
130+
offer_command = f"offer {MYSQL_APP1}:replication-offer"
131+
await ops_test.juju(*offer_command.split())
131132

132133
logger.info("Consume offer in second model")
133-
await second_model.consume(endpoint=f"admin/{first_model.info.name}.{MYSQL_APP1}")
134+
consume_command = (
135+
f"consume -m {second_model.info.name} admin/{first_model.info.name}.{MYSQL_APP1}"
136+
)
137+
await ops_test.juju(*consume_command.split())
134138

135139
logger.info("Relating the two mysql clusters")
136140
await second_model.integrate(f"{MYSQL_APP1}", f"{MYSQL_APP2}:replication")

tests/integration/high_availability/test_crash_during_setup.py

Lines changed: 15 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@
88
import pytest
99
import yaml
1010

11-
from ..helpers import delete_file_or_directory_in_unit, write_content_to_file_in_unit
12-
from .high_availability_helpers import CLUSTER_NAME, delete_pod
11+
from .high_availability_helpers import CLUSTER_NAME, delete_pod, scale_application
1312

1413
logger = logging.getLogger(__name__)
1514

@@ -21,76 +20,44 @@
2120
@pytest.mark.group(1)
2221
@pytest.mark.abort_on_fail
2322
async def test_crash_during_cluster_setup(ops_test) -> None:
23+
"""Test primary crash during startup.
24+
25+
It must recover/end setup when the primary got offline.
26+
"""
2427
mysql_charm = await ops_test.build_charm(".")
2528

2629
config = {"cluster-name": CLUSTER_NAME, "profile": "testing"}
2730
resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
2831

29-
logger.info("Deploying 3 units of mysql-k8s")
32+
logger.info("Deploying 1 units of mysql-k8s")
3033
mysql_application = await ops_test.model.deploy(
3134
mysql_charm,
3235
application_name=APP_NAME,
3336
config=config,
3437
resources=resources,
35-
num_units=3,
38+
num_units=1,
3639
3740
trust=True,
3841
)
3942

40-
logger.info("Waiting until application enters maintenance status")
41-
await ops_test.model.block_until(
42-
lambda: mysql_application.status == "maintenance", timeout=TIMEOUT
43-
)
43+
logger.info("Waiting for single unit to be ready")
44+
await ops_test.model.block_until(lambda: mysql_application.status == "active", timeout=TIMEOUT)
4445

45-
leader_unit = None
46-
non_leader_units = []
46+
# leader unit is the 1st unit
47+
leader_unit = mysql_application.units[0]
4748

48-
for unit in mysql_application.units:
49-
if not await unit.is_leader_from_status():
50-
non_leader_units.append(unit)
51-
else:
52-
leader_unit = unit
49+
logger.info("Scale to 3 units")
50+
await scale_application(ops_test, APP_NAME, 3, False)
5351

54-
logger.info("Waiting until leader unit is creating cluster")
52+
logger.info("Waiting until application enters waiting status")
5553
await ops_test.model.block_until(
56-
lambda: leader_unit.workload_status == "maintenance"
57-
and leader_unit.agent_status == "executing"
58-
and "Creating cluster" in leader_unit.workload_status_message,
59-
timeout=TIMEOUT,
54+
lambda: mysql_application.status == "waiting", timeout=TIMEOUT
6055
)
6156

62-
logger.info("Disabling non-leader units to avoid joining the cluster")
63-
for unit in non_leader_units:
64-
unit_label = unit.name.replace("/", "-")
65-
await write_content_to_file_in_unit(
66-
ops_test,
67-
unit,
68-
f"/var/lib/juju/agents/unit-{unit_label}/charm/disable",
69-
"",
70-
container_name="charm",
71-
)
72-
7357
logger.info("Deleting pod")
7458
delete_pod(ops_test, leader_unit)
7559

76-
logger.info("Waiting until pod rescheduled and cluster is set up again")
7760
async with ops_test.fast_forward("60s"):
78-
await ops_test.model.block_until(
79-
lambda: leader_unit.workload_status == "active"
80-
and leader_unit.workload_status_message == "Primary",
81-
timeout=TIMEOUT,
82-
)
83-
84-
logger.info("Removing disabled flag from non-leader units")
85-
for unit in non_leader_units:
86-
unit_label = unit.name.replace("/", "-")
87-
await delete_file_or_directory_in_unit(
88-
ops_test,
89-
unit.name,
90-
f"/var/lib/juju/agents/unit-{unit_label}/charm/disable",
91-
container_name="charm",
92-
)
93-
9461
logger.info("Waiting until cluster is fully active")
9562
await ops_test.model.wait_for_idle(
9663
apps=[APP_NAME],

0 commit comments

Comments
 (0)