Skip to content

Commit e2fa39f

Browse files
committed
Fix unit tests, add ops-scenario.
1 parent d32789f commit e2fa39f

File tree

4 files changed

+66
-105
lines changed

4 files changed

+66
-105
lines changed

poetry.lock

Lines changed: 11 additions & 12 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ coverage = {extras = ["toml"], version = "^7.8.0"}
4040
pytest = "^8.4.1"
4141
pytest-mock = "^3.14.1"
4242
ops = {version = "^2.22.0", extras = ["testing"]}
43+
ops-scenario = "^7.23.0"
4344

4445
[tool.poetry.group.integration]
4546
optional = true

tests/unit/test_charm.py

Lines changed: 52 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -12,103 +12,111 @@
1212
from core.state import PEER_RELATION
1313

1414

15-
def test_start_maintenance_status_when_starting():
15+
def test_start_maintenance_status():
1616
"""Charm enters MaintenanceStatus when Cassandra is not yet healthy."""
1717
ctx = testing.Context(CassandraCharm)
1818
relation = testing.PeerRelation(id=1, endpoint=PEER_RELATION)
19-
state_in = testing.State(leader=True, relations={relation})
19+
state = testing.State(leader=True, relations={relation})
2020

2121
with (
2222
patch("managers.config.ConfigManager.render_env"),
23-
patch("workload.CassandraWorkload.restart"),
24-
patch("workload.CassandraWorkload.is_alive"),
23+
patch("managers.config.ConfigManager.render_cassandra_config"),
24+
patch("charm.CassandraWorkload"),
2525
patch(
2626
"managers.cluster.ClusterManager.is_healthy",
2727
new_callable=PropertyMock(return_value=False),
2828
),
2929
):
30-
state_out = ctx.run(ctx.on.start(), state_in)
31-
assert state_out.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
32-
assert state_out.get_relation(1).local_unit_data.get("workload_state") == "active"
33-
assert state_out.get_relation(1).local_app_data.get("cluster_state") == "active"
30+
state = ctx.run(ctx.on.start(), state)
31+
assert state.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
32+
assert state.get_relation(1).local_unit_data.get("workload_state") == "starting"
33+
assert not state.get_relation(1).local_app_data.get("cluster_state")
3434

35+
state = ctx.run(ctx.on.update_status(), state)
36+
assert state.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
37+
assert state.get_relation(1).local_unit_data.get("workload_state") == "starting"
38+
assert not state.get_relation(1).local_app_data.get("cluster_state")
3539

36-
def test_start_sets_active_status_when_healthy():
40+
41+
def test_start_active_status_when_healthy():
3742
"""Charm enters ActiveStatus when Cassandra is healthy after start."""
3843
ctx = testing.Context(CassandraCharm)
3944
relation = testing.PeerRelation(id=1, endpoint=PEER_RELATION)
40-
state_in = testing.State(leader=True, relations={relation})
45+
state = testing.State(leader=True, relations={relation})
4146

4247
with (
4348
patch("managers.config.ConfigManager.render_env"),
44-
patch("workload.CassandraWorkload.restart"),
45-
patch("workload.CassandraWorkload.is_alive"),
49+
patch("managers.config.ConfigManager.render_cassandra_config"),
50+
patch("charm.CassandraWorkload"),
4651
patch(
4752
"managers.cluster.ClusterManager.is_healthy",
4853
new_callable=PropertyMock(return_value=True),
4954
),
5055
):
51-
state_out = ctx.run(ctx.on.start(), state_in)
52-
assert state_out.unit_status == ops.ActiveStatus()
53-
assert state_out.get_relation(1).local_unit_data.get("workload_state") == "active"
54-
assert state_out.get_relation(1).local_app_data.get("cluster_state") == "active"
56+
state = ctx.run(ctx.on.start(), state)
57+
assert state.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
58+
assert state.get_relation(1).local_unit_data.get("workload_state") == "starting"
59+
assert not state.get_relation(1).local_app_data.get("cluster_state")
60+
61+
state = ctx.run(ctx.on.update_status(), state)
62+
assert state.unit_status == ops.ActiveStatus()
63+
assert state.get_relation(1).local_unit_data.get("workload_state") == "active"
64+
assert state.get_relation(1).local_app_data.get("cluster_state") == "active"
5565

5666

5767
def test_start_only_after_leader_active():
5868
"""Ensure Cassandra node starts only after leader is active and cluster_state is 'active'."""
5969
ctx = testing.Context(CassandraCharm)
6070
relation = testing.PeerRelation(id=1, endpoint=PEER_RELATION)
61-
state_in = testing.State(leader=False, relations={relation})
71+
state = testing.State(leader=False, relations={relation})
6272

6373
with (
6474
patch("managers.config.ConfigManager.render_env"),
65-
patch("workload.CassandraWorkload.restart") as restart,
66-
patch("workload.CassandraWorkload.is_alive"),
75+
patch("managers.config.ConfigManager.render_cassandra_config"),
76+
patch("charm.CassandraWorkload") as workload,
6777
patch(
6878
"managers.cluster.ClusterManager.is_healthy",
6979
new_callable=PropertyMock(return_value=False),
7080
),
7181
):
72-
state_out = ctx.run(ctx.on.start(), state_in)
73-
assert state_out.unit_status == ops.MaintenanceStatus("installing Cassandra")
74-
restart.assert_not_called()
82+
state = ctx.run(ctx.on.start(), state)
83+
assert state.unit_status == ops.WaitingStatus("waiting for cluster to start")
84+
assert state.get_relation(1).local_unit_data.get("workload_state") == "waiting_for_start"
85+
workload.return_value.start.assert_not_called()
7586

7687
relation = testing.PeerRelation(
7788
id=1, endpoint=PEER_RELATION, local_app_data={"cluster_state": "active"}
7889
)
79-
state_in = testing.State(leader=False, relations={relation})
90+
state = testing.State(leader=False, relations={relation})
8091

8192
with (
8293
patch("managers.config.ConfigManager.render_env"),
83-
patch("workload.CassandraWorkload.restart") as restart,
84-
patch("workload.CassandraWorkload.is_alive"),
94+
patch("managers.config.ConfigManager.render_cassandra_config"),
95+
patch("charm.CassandraWorkload") as workload,
8596
patch(
8697
"managers.cluster.ClusterManager.is_healthy",
8798
new_callable=PropertyMock(return_value=False),
8899
),
89100
):
90-
state_out = ctx.run(ctx.on.start(), state_in)
91-
assert state_out.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
92-
assert state_out.get_relation(1).local_unit_data.get("workload_state") == "active"
93-
restart.assert_called()
101+
state = ctx.run(ctx.on.start(), state)
102+
assert state.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
103+
assert state.get_relation(1).local_unit_data.get("workload_state") == "starting"
104+
workload.return_value.start.assert_called()
94105

95106

96107
def test_config_changed_invalid_config():
97108
"""Ensure charm enters BlockedStatus if config is invalid during config_changed event."""
98109
ctx = testing.Context(CassandraCharm)
99110
relation = testing.PeerRelation(id=1, endpoint=PEER_RELATION)
100-
state_in = testing.State(leader=True, relations={relation}, config={"profile": "invalid"})
111+
state = testing.State(leader=True, relations={relation}, config={"profile": "invalid"})
112+
101113
with (
102114
patch("managers.config.ConfigManager.render_env"),
103-
patch("workload.CassandraWorkload.restart"),
104-
patch("workload.CassandraWorkload.is_alive"),
105-
patch(
106-
"managers.cluster.ClusterManager.is_healthy",
107-
new_callable=PropertyMock(return_value=False),
108-
),
115+
patch("managers.config.ConfigManager.render_cassandra_config"),
116+
patch("charm.CassandraWorkload"),
109117
):
110-
state_out = ctx.run(ctx.on.config_changed(), state_in)
111-
assert state_out.unit_status == ops.BlockedStatus("invalid config")
118+
state = ctx.run(ctx.on.config_changed(), state)
119+
assert state.unit_status == ops.BlockedStatus("invalid config")
112120

113121

114122
def test_config_changed_no_restart():
@@ -117,59 +125,12 @@ def test_config_changed_no_restart():
117125
relation = testing.PeerRelation(
118126
id=1, endpoint=PEER_RELATION, local_unit_data={"workload_state": "starting"}
119127
)
120-
state_in = testing.State(leader=True, relations={relation})
128+
state = testing.State(leader=True, relations={relation})
121129
with (
122130
patch("managers.config.ConfigManager.render_env"),
123-
patch("workload.CassandraWorkload.restart") as restart,
124-
patch("workload.CassandraWorkload.is_alive"),
125-
patch(
126-
"managers.cluster.ClusterManager.is_healthy",
127-
new_callable=PropertyMock(return_value=False),
128-
),
129-
):
130-
state_out = ctx.run(ctx.on.config_changed(), state_in)
131-
assert state_out.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
132-
restart.assert_not_called()
133-
134-
135-
def test_collect_unit_status_active_but_not_healthy():
136-
"""Ensure unit is MaintenanceStatus if workload_state is 'active' but node is not healthy."""
137-
ctx = testing.Context(CassandraCharm)
138-
relation = testing.PeerRelation(
139-
id=1,
140-
endpoint=PEER_RELATION,
141-
local_unit_data={"workload_state": "active"},
142-
)
143-
state_in = testing.State(leader=True, relations={relation})
144-
with (
145-
patch("managers.config.ConfigManager.render_env"),
146-
patch("workload.CassandraWorkload.restart"),
147-
patch("workload.CassandraWorkload.is_alive"),
148-
patch(
149-
"managers.cluster.ClusterManager.is_healthy",
150-
new_callable=PropertyMock(return_value=False),
151-
),
152-
):
153-
state_out = ctx.run(ctx.on.collect_unit_status(), state_in)
154-
assert state_out.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
155-
156-
157-
def test_start_not_leader_and_cluster_state_not_active():
158-
"""Ensure charm does not start Cassandra if not leader and cluster_state is not 'active'."""
159-
ctx = testing.Context(CassandraCharm)
160-
relation = testing.PeerRelation(
161-
id=1, endpoint=PEER_RELATION, local_app_data={"cluster_state": "pending"}
162-
)
163-
state_in = testing.State(leader=False, relations={relation})
164-
with (
165-
patch("managers.config.ConfigManager.render_env"),
166-
patch("workload.CassandraWorkload.restart") as restart,
167-
patch("workload.CassandraWorkload.is_alive"),
168-
patch(
169-
"managers.cluster.ClusterManager.is_healthy",
170-
new_callable=PropertyMock(return_value=False),
171-
),
131+
patch("managers.config.ConfigManager.render_cassandra_config"),
132+
patch("charm.CassandraWorkload") as workload,
172133
):
173-
state_out = ctx.run(ctx.on.start(), state_in)
174-
assert state_out.unit_status == ops.MaintenanceStatus("installing Cassandra")
175-
restart.assert_not_called()
134+
state = ctx.run(ctx.on.config_changed(), state)
135+
assert state.unit_status == ops.MaintenanceStatus("waiting for Cassandra to start")
136+
workload.return_value.restart.assert_not_called()

tests/unit/test_workload.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def test_alive_false_if_service_missing(mock_snap_cache):
2727
assert workload.is_alive() is False
2828

2929

30-
def test_exec_successful_command_returns_output():
30+
def test_exec_successful_command_returns_output(mock_snap_cache):
3131
with patch("workload.subprocess.run") as mock_run:
3232
mock_run.return_value = subprocess.CompletedProcess(
3333
args=["echo", "hello"], returncode=0, stdout="hello\n", stderr=""
@@ -38,7 +38,7 @@ def test_exec_successful_command_returns_output():
3838
assert stderr == ""
3939

4040

41-
def test_exec_command_raises_on_failure():
41+
def test_exec_command_raises_on_failure(mock_snap_cache):
4242
with patch("workload.subprocess.run") as mock_run:
4343
mock_run.side_effect = subprocess.CalledProcessError(
4444
returncode=1, cmd=["false"], output="", stderr="error"

0 commit comments

Comments
 (0)