Skip to content

Commit 8327d5e

Browse files
authored
increased key logs verbosity (s/debug/info/) (#513)
1 parent 1888fd3 commit 8327d5e

File tree

2 files changed

+21
-23
lines changed

2 files changed

+21
-23
lines changed

src/charm.py

Lines changed: 19 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ def join_unit_to_cluster(self) -> None:
329329
cluster_primary = self._get_primary_from_online_peer()
330330
if not cluster_primary:
331331
self.unit.status = WaitingStatus("waiting to get cluster primary from peers")
332-
logger.debug("waiting: unable to retrieve the cluster primary from peers")
332+
logger.info("waiting: unable to retrieve the cluster primary from peers")
333333
return
334334

335335
if (
@@ -358,7 +358,7 @@ def join_unit_to_cluster(self) -> None:
358358

359359
if self._mysql.are_locks_acquired(from_instance=lock_instance or cluster_primary):
360360
self.unit.status = WaitingStatus("waiting to join the cluster")
361-
logger.debug("waiting: cluster lock is held")
361+
logger.info("waiting: cluster lock is held")
362362
return
363363

364364
self.unit.status = MaintenanceStatus("joining the cluster")
@@ -387,18 +387,17 @@ def join_unit_to_cluster(self) -> None:
387387
from_instance=cluster_primary,
388388
lock_instance=lock_instance,
389389
)
390-
logger.debug(f"Added instance {instance_address} to cluster")
391390
except MySQLAddInstanceToClusterError:
392-
logger.debug(f"Unable to add instance {instance_address} to cluster.")
391+
logger.info(f"Unable to add instance {instance_address} to cluster.")
393392
return
394393
except MySQLLockAcquisitionError:
395394
self.unit.status = WaitingStatus("waiting to join the cluster")
396-
logger.debug("waiting: failed to acquire lock when adding instance to cluster")
395+
logger.info("waiting: failed to acquire lock when adding instance to cluster")
397396
return
398397

399398
self.unit_peer_data["member-state"] = "online"
400399
self.unit.status = ActiveStatus(self.active_status_message)
401-
logger.debug(f"Instance {instance_label} is cluster member")
400+
logger.info(f"Instance {instance_label} added to cluster")
402401

403402
def _reconcile_pebble_layer(self, container: Container) -> None:
404403
"""Reconcile pebble layer."""
@@ -561,9 +560,9 @@ def _on_leader_elected(self, _) -> None:
561560
BACKUPS_PASSWORD_KEY,
562561
]
563562

563+
logger.info("Generating internal user credentials")
564564
for required_password in required_passwords:
565565
if not self.get_secret("app", required_password):
566-
logger.debug(f"Setting {required_password}")
567566
self.set_secret(
568567
"app", required_password, generate_random_password(PASSWORD_LENGTH)
569568
)
@@ -605,20 +604,20 @@ def _configure_instance(self, container) -> None:
605604
"""Configure the instance for use in Group Replication."""
606605
# Run mysqld for the first time to
607606
# bootstrap the data directory and users
608-
logger.debug("Initializing instance")
607+
logger.info("Initializing mysqld")
609608
try:
610609
self._mysql.fix_data_dir(container)
611610
self._mysql.initialise_mysqld()
612611

613612
# Add the pebble layer
614-
logger.debug("Adding pebble layer")
613+
logger.info("Adding pebble layer")
615614
container.add_layer(MYSQLD_SAFE_SERVICE, self._pebble_layer, combine=True)
616615
container.restart(MYSQLD_SAFE_SERVICE)
617616

618-
logger.debug("Waiting for instance to be ready")
617+
logger.info("Waiting for instance to be ready")
619618
self._mysql.wait_until_mysql_connection(check_port=False)
620619

621-
logger.info("Configuring instance")
620+
logger.info("Configuring initialized mysqld")
622621
# Configure all base users and revoke privileges from the root users
623622
self._mysql.configure_mysql_users(password_needed=False)
624623

@@ -696,7 +695,7 @@ def _on_mysql_pebble_ready(self, event) -> None:
696695
if self._mysql.is_data_dir_initialised():
697696
# Data directory is already initialised, skip configuration
698697
self.unit.status = MaintenanceStatus("Starting mysqld")
699-
logger.debug("Data directory is already initialised, skipping configuration")
698+
logger.info("Data directory is already initialised, skipping configuration")
700699
self._reconcile_pebble_layer(container)
701700
return
702701

@@ -743,12 +742,12 @@ def _handle_potential_cluster_crash_scenario(self) -> bool:
743742
if not self._mysql.is_mysqld_running():
744743
return True
745744

746-
only_single_unitialized_node_across_cluster = (
745+
only_single_uninitialized_node_across_cluster = (
747746
self.only_one_cluster_node_thats_uninitialized
748747
)
749748

750749
if (
751-
not self.cluster_initialized and not only_single_unitialized_node_across_cluster
750+
not self.cluster_initialized and not only_single_uninitialized_node_across_cluster
752751
) or not self.unit_peer_data.get("member-role"):
753752
return True
754753

@@ -783,19 +782,19 @@ def _handle_potential_cluster_crash_scenario(self) -> bool:
783782
# Add state 'offline' for this unit (self.peers.unit does not
784783
# include this unit)
785784
if (all_states | {"offline"} == {"offline"} and self.unit.is_leader()) or (
786-
only_single_unitialized_node_across_cluster and all_states == {"waiting"}
785+
only_single_uninitialized_node_across_cluster and all_states == {"waiting"}
787786
):
788787
# All instance are off, reboot cluster from outage from the leader unit
789788

790789
logger.info("Attempting reboot from complete outage.")
791790
try:
792791
# Need condition to avoid rebooting on all units of application
793-
if self.unit.is_leader() or only_single_unitialized_node_across_cluster:
792+
if self.unit.is_leader() or only_single_uninitialized_node_across_cluster:
794793
self._mysql.reboot_from_complete_outage()
795794
except MySQLRebootFromCompleteOutageError:
796795
logger.error("Failed to reboot cluster from complete outage.")
797796

798-
if only_single_unitialized_node_across_cluster and all_states == {"waiting"}:
797+
if only_single_uninitialized_node_across_cluster and all_states == {"waiting"}:
799798
self._mysql.drop_group_replication_metadata_schema()
800799
self.create_cluster()
801800
self.unit.status = ActiveStatus(self.active_status_message)
@@ -826,8 +825,7 @@ def _is_cluster_blocked(self) -> bool:
826825

827826
if not member_state or member_state == "restarting":
828827
# avoid changing status while tls is being set up or charm is being initialized
829-
logger.info("Unit is waiting or restarting")
830-
logger.debug(f"{member_state=}")
828+
logger.info(f"Unit {member_state=}")
831829
return True
832830

833831
# avoid changing status while async replication is setting up
@@ -837,7 +835,7 @@ def _on_update_status(self, _: Optional[UpdateStatusEvent]) -> None:
837835
"""Handle the update status event."""
838836
if not self.upgrade.idle:
839837
# avoid changing status while upgrade is in progress
840-
logger.debug("Application is upgrading. Skipping.")
838+
logger.info("Application is upgrading. Skipping.")
841839
return
842840
if not self.unit.is_leader() and self._is_unit_waiting_to_join_cluster():
843841
# join cluster test takes precedence over blocked test
@@ -851,7 +849,7 @@ def _on_update_status(self, _: Optional[UpdateStatusEvent]) -> None:
851849

852850
container = self.unit.get_container(CONTAINER_NAME)
853851
if not container.can_connect():
854-
logger.debug("Cannot connect to pebble in the mysql container")
852+
logger.info("Cannot connect to pebble in the mysql container")
855853
return
856854

857855
if self._handle_potential_cluster_crash_scenario():

src/upgrade.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ def _on_pebble_ready(self, event) -> None:
270270
self._complete_upgrade()
271271

272272
def _recover_multi_unit_cluster(self) -> None:
273-
logger.debug("Recovering unit")
273+
logger.info("Recovering unit")
274274
try:
275275
for attempt in Retrying(
276276
stop=stop_after_attempt(RECOVER_ATTEMPTS), wait=wait_fixed(10)
@@ -332,7 +332,7 @@ def _check_server_upgradeability(self) -> None:
332332
return
333333
instance = getfqdn(self.charm.get_unit_hostname(f"{self.charm.app.name}/0"))
334334
self.charm._mysql.verify_server_upgradable(instance=instance)
335-
logger.debug("MySQL server is upgradeable")
335+
logger.info("Check MySQL server upgradeability passed")
336336

337337
def _check_server_unsupported_downgrade(self) -> bool:
338338
"""Check error log for unsupported downgrade.

0 commit comments

Comments
 (0)