Skip to content

Commit e8b6f2d

Browse files
authored
update libs (#354)
* update libs * update libs from VM charm * update test_mongos to test the new way of removing users * update lib * update src * wait for mongos charm to remove the user * update tests
1 parent 4c3c9e7 commit e8b6f2d

File tree

4 files changed

+118
-12
lines changed

4 files changed

+118
-12
lines changed

lib/charms/mongodb/v0/config_server_interface.py

Lines changed: 63 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,15 @@
1414
DatabaseRequestedEvent,
1515
DatabaseRequires,
1616
)
17+
from charms.mongodb.v0.mongo import MongoConnection
1718
from charms.mongodb.v1.mongos import MongosConnection
18-
from ops.charm import CharmBase, EventBase, RelationBrokenEvent, RelationChangedEvent
19+
from ops.charm import (
20+
CharmBase,
21+
EventBase,
22+
RelationBrokenEvent,
23+
RelationChangedEvent,
24+
RelationCreatedEvent,
25+
)
1926
from ops.framework import Object
2027
from ops.model import (
2128
ActiveStatus,
@@ -24,6 +31,7 @@
2431
StatusBase,
2532
WaitingStatus,
2633
)
34+
from pymongo.errors import PyMongoError
2735

2836
from config import Config
2937

@@ -43,16 +51,20 @@
4351

4452
# Increment this PATCH version before using `charmcraft publish-lib` or reset
4553
# to 0 if you are raising the major API version
46-
LIBPATCH = 13
54+
LIBPATCH = 14
4755

4856

4957
class ClusterProvider(Object):
5058
"""Manage relations between the config server and mongos router on the config-server side."""
5159

5260
def __init__(
53-
self, charm: CharmBase, relation_name: str = Config.Relations.CLUSTER_RELATIONS_NAME
61+
self,
62+
charm: CharmBase,
63+
relation_name: str = Config.Relations.CLUSTER_RELATIONS_NAME,
64+
substrate: str = Config.Substrate.VM,
5465
) -> None:
5566
"""Constructor for ShardingProvider object."""
67+
self.substrate = substrate
5668
self.relation_name = relation_name
5769
self.charm = charm
5870
self.database_provides = DatabaseProvides(self.charm, relation_name=self.relation_name)
@@ -179,7 +191,9 @@ def _on_relation_broken(self, event) -> None:
179191
logger.info("Skipping relation broken event, broken event due to scale down")
180192
return
181193

182-
self.charm.client_relations.oversee_users(departed_relation_id, event)
194+
# mongos-k8s router is in charge of removing its own users.
195+
if self.substrate == Config.Substrate.VM:
196+
self.charm.client_relations.oversee_users(departed_relation_id, event)
183197

184198
def update_config_server_db(self, event):
185199
"""Provides related mongos applications with new config server db."""
@@ -244,7 +258,7 @@ def __init__(
244258
super().__init__(charm, self.relation_name)
245259
self.framework.observe(
246260
charm.on[self.relation_name].relation_created,
247-
self.database_requires._on_relation_created_event,
261+
self._on_relation_created_handler,
248262
)
249263

250264
self.framework.observe(
@@ -261,6 +275,11 @@ def __init__(
261275
charm.on[self.relation_name].relation_broken, self._on_relation_broken
262276
)
263277

278+
def _on_relation_created_handler(self, event: RelationCreatedEvent) -> None:
279+
logger.info("Integrating to config-server")
280+
self.charm.status.set_and_share_status(WaitingStatus("Connecting to config-server"))
281+
self.database_requires._on_relation_created_event(event)
282+
264283
def _on_database_created(self, event) -> None:
265284
if self.charm.upgrade_in_progress:
266285
logger.warning(
@@ -303,6 +322,8 @@ def _on_relation_changed(self, event) -> None:
303322

304323
# avoid restarting mongos when possible
305324
if not updated_keyfile and not updated_config and self.is_mongos_running():
325+
# mongos-k8s router must update its users on start
326+
self._update_k8s_users(event)
306327
return
307328

308329
# mongos is not available until it is using new secrets
@@ -321,6 +342,20 @@ def _on_relation_changed(self, event) -> None:
321342
if self.charm.unit.is_leader():
322343
self.charm.mongos_initialised = True
323344

345+
# mongos-k8s router must update its users on start
346+
self._update_k8s_users(event)
347+
348+
def _update_k8s_users(self, event) -> None:
349+
if self.substrate != Config.Substrate.K8S:
350+
return
351+
352+
# K8s can handle its 1:Many users after being initialized
353+
try:
354+
self.charm.client_relations.oversee_users(None, None)
355+
except PyMongoError:
356+
event.defer()
357+
logger.debug("failed to add users on mongos-k8s router, will defer and try again.")
358+
324359
def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
325360
# Only relation_deparated events can check if scaling down
326361
if not self.charm.has_departed_run(event.relation.id):
@@ -334,6 +369,13 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
334369
logger.info("Skipping relation broken event, broken event due to scale down")
335370
return
336371

372+
try:
373+
self.handle_mongos_k8s_users_removal()
374+
except PyMongoError:
375+
logger.debug("Trouble removing router users, will defer and try again")
376+
event.defer()
377+
return
378+
337379
self.charm.stop_mongos_service()
338380
logger.info("Stopped mongos daemon")
339381

@@ -348,9 +390,24 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
348390
if self.substrate == Config.Substrate.VM:
349391
self.charm.remove_connection_info()
350392
else:
351-
self.db_initialised = False
393+
self.charm.db_initialised = False
352394

353395
# BEGIN: helper functions
396+
def handle_mongos_k8s_users_removal(self) -> None:
397+
"""Handles the removal of all client mongos-k8s users and the mongos-k8s admin user.
398+
399+
Raises:
400+
PyMongoError
401+
"""
402+
if not self.charm.unit.is_leader() or self.substrate != Config.Substrate.K8S:
403+
return
404+
405+
self.charm.client_relations.remove_all_relational_users()
406+
407+
# now that the client mongos users have been removed we can remove ourself
408+
with MongoConnection(self.charm.mongo_config) as mongo:
409+
mongo.drop_user(self.charm.mongo_config.username)
410+
354411
def pass_hook_checks(self, event):
355412
"""Runs the pre-hooks checks for ClusterRequirer, returns True if all pass."""
356413
if self.is_mongos_tls_missing():

lib/charms/mongodb/v1/mongodb_provider.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,15 @@ def remove_users(
227227
):
228228
continue
229229

230+
# for user removal of mongos-k8s router, we let the router remove itself
231+
if (
232+
self.charm.is_role(Config.Role.CONFIG_SERVER)
233+
and self.substrate == Config.Substrate.K8S
234+
):
235+
logger.info("K8s routers will remove themselves.")
236+
self._remove_from_relational_users_to_manage(username)
237+
return
238+
230239
mongo.drop_user(username)
231240
self._remove_from_relational_users_to_manage(username)
232241

@@ -514,6 +523,34 @@ def _add_to_relational_users_to_manage(self, user_to_add: str) -> None:
514523
current_users.add(user_to_add)
515524
self._update_relational_users_to_manage(current_users)
516525

526+
def remove_all_relational_users(self):
527+
"""Removes all users from DB.
528+
529+
Raises: PyMongoError.
530+
"""
531+
with MongoConnection(self.charm.mongo_config) as mongo:
532+
database_users = mongo.get_users()
533+
534+
users_being_managed = database_users.intersection(self._get_relational_users_to_manage())
535+
self.remove_users(users_being_managed, expected_current_users=set())
536+
537+
# now we must remove all of their connection info
538+
for relation in self._get_relations():
539+
fields = self.database_provides.fetch_my_relation_data([relation.id])[relation.id]
540+
self.database_provides.delete_relation_data(relation.id, fields=list(fields))
541+
542+
# unforatunately the above doesn't work to remove secrets, so we forcibly remove the
543+
# rest manually remove the secret before clearing the databag
544+
for unit in relation.units:
545+
secret_id = json.loads(relation.data[unit]["data"])["secret-user"]
546+
# secret id is the same on all units for `secret-user`
547+
break
548+
549+
user_secrets = self.charm.model.get_secret(id=secret_id)
550+
user_secrets.remove_all_revisions()
551+
user_secrets.get_content(refresh=True)
552+
relation.data[self.charm.app].clear()
553+
517554
@staticmethod
518555
def _get_database_from_relation(relation: Relation) -> Optional[str]:
519556
"""Return database name from relation."""

src/charm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def __init__(self, *args):
153153

154154
self.shard = ConfigServerRequirer(self)
155155
self.config_server = ShardingProvider(self)
156-
self.cluster = ClusterProvider(self)
156+
self.cluster = ClusterProvider(self, substrate=Config.SUBSTRATE)
157157

158158
self.upgrade = MongoDBUpgrade(self)
159159

tests/integration/sharding_tests/test_mongos.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55

66
import pytest
7+
import tenacity
78
from pymongo.errors import OperationFailure
89
from pytest_operator.plugin import OpsTest
910

@@ -17,7 +18,9 @@
1718
SHARD_REL_NAME = "sharding"
1819
CLUSTER_REL_NAME = "cluster"
1920
CONFIG_SERVER_REL_NAME = "config-server"
21+
NUMBER_OF_MONGOS_USERS_WHEN_NO_ROUTERS = 3 # operator-user, backup-user, and montior-user
2022
TIMEOUT = 10 * 60
23+
TWO_MINUTE_TIMEOUT = 2 * 60
2124

2225

2326
@pytest.mark.group(1)
@@ -125,7 +128,11 @@ async def test_connect_to_cluster_creates_user(ops_test: OpsTest) -> None:
125128
@pytest.mark.group(1)
126129
@pytest.mark.abort_on_fail
127130
async def test_disconnect_from_cluster_removes_user(ops_test: OpsTest) -> None:
128-
"""Verifies that when the cluster is formed a the user is removed."""
131+
"""Verifies that when the cluster is formed the client users are removed.
132+
133+
Since mongos-k8s router supports multiple users, we expect that the removal of this
134+
integration will result in the removal of the mongos-k8s admin users and all of its clients.
135+
"""
129136
# generate URI for new mongos user
130137
(username, password) = await get_related_username_password(
131138
ops_test, app_name=MONGOS_APP_NAME, relation_name=CLUSTER_REL_NAME
@@ -135,7 +142,6 @@ async def test_disconnect_from_cluster_removes_user(ops_test: OpsTest) -> None:
135142
mongos_client = await get_direct_mongo_client(
136143
ops_test, app_name=CONFIG_SERVER_APP_NAME, mongos=True
137144
)
138-
num_users = count_users(mongos_client)
139145
await ops_test.model.applications[MONGOS_APP_NAME].remove_relation(
140146
f"{MONGOS_APP_NAME}:cluster",
141147
f"{CONFIG_SERVER_APP_NAME}:cluster",
@@ -148,9 +154,15 @@ async def test_disconnect_from_cluster_removes_user(ops_test: OpsTest) -> None:
148154
)
149155
num_users_after_removal = count_users(mongos_client)
150156

151-
assert (
152-
num_users - 1 == num_users_after_removal
153-
), "Cluster did not remove user after integration removal."
157+
for attempt in tenacity.Retrying(
158+
reraise=True,
159+
stop=tenacity.stop_after_delay(TWO_MINUTE_TIMEOUT),
160+
wait=tenacity.wait_fixed(10),
161+
):
162+
with attempt:
163+
assert (
164+
NUMBER_OF_MONGOS_USERS_WHEN_NO_ROUTERS == num_users_after_removal
165+
), "Cluster did not remove user after integration removal."
154166

155167
mongos_user_client = await get_direct_mongo_client(
156168
ops_test,

0 commit comments

Comments
 (0)