@@ -329,7 +329,7 @@ def join_unit_to_cluster(self) -> None:
329
329
cluster_primary = self ._get_primary_from_online_peer ()
330
330
if not cluster_primary :
331
331
self .unit .status = WaitingStatus ("waiting to get cluster primary from peers" )
332
- logger .debug ("waiting: unable to retrieve the cluster primary from peers" )
332
+ logger .info ("waiting: unable to retrieve the cluster primary from peers" )
333
333
return
334
334
335
335
if (
@@ -358,7 +358,7 @@ def join_unit_to_cluster(self) -> None:
358
358
359
359
if self ._mysql .are_locks_acquired (from_instance = lock_instance or cluster_primary ):
360
360
self .unit .status = WaitingStatus ("waiting to join the cluster" )
361
- logger .debug ("waiting: cluster lock is held" )
361
+ logger .info ("waiting: cluster lock is held" )
362
362
return
363
363
364
364
self .unit .status = MaintenanceStatus ("joining the cluster" )
@@ -387,18 +387,17 @@ def join_unit_to_cluster(self) -> None:
387
387
from_instance = cluster_primary ,
388
388
lock_instance = lock_instance ,
389
389
)
390
- logger .debug (f"Added instance { instance_address } to cluster" )
391
390
except MySQLAddInstanceToClusterError :
392
- logger .debug (f"Unable to add instance { instance_address } to cluster." )
391
+ logger .info (f"Unable to add instance { instance_address } to cluster." )
393
392
return
394
393
except MySQLLockAcquisitionError :
395
394
self .unit .status = WaitingStatus ("waiting to join the cluster" )
396
- logger .debug ("waiting: failed to acquire lock when adding instance to cluster" )
395
+ logger .info ("waiting: failed to acquire lock when adding instance to cluster" )
397
396
return
398
397
399
398
self .unit_peer_data ["member-state" ] = "online"
400
399
self .unit .status = ActiveStatus (self .active_status_message )
401
- logger .debug (f"Instance { instance_label } is cluster member " )
400
+ logger .info (f"Instance { instance_label } added to cluster " )
402
401
403
402
def _reconcile_pebble_layer (self , container : Container ) -> None :
404
403
"""Reconcile pebble layer."""
@@ -561,9 +560,9 @@ def _on_leader_elected(self, _) -> None:
561
560
BACKUPS_PASSWORD_KEY ,
562
561
]
563
562
563
+ logger .info ("Generating internal user credentials" )
564
564
for required_password in required_passwords :
565
565
if not self .get_secret ("app" , required_password ):
566
- logger .debug (f"Setting { required_password } " )
567
566
self .set_secret (
568
567
"app" , required_password , generate_random_password (PASSWORD_LENGTH )
569
568
)
@@ -605,20 +604,20 @@ def _configure_instance(self, container) -> None:
605
604
"""Configure the instance for use in Group Replication."""
606
605
# Run mysqld for the first time to
607
606
# bootstrap the data directory and users
608
- logger .debug ("Initializing instance " )
607
+ logger .info ("Initializing mysqld " )
609
608
try :
610
609
self ._mysql .fix_data_dir (container )
611
610
self ._mysql .initialise_mysqld ()
612
611
613
612
# Add the pebble layer
614
- logger .debug ("Adding pebble layer" )
613
+ logger .info ("Adding pebble layer" )
615
614
container .add_layer (MYSQLD_SAFE_SERVICE , self ._pebble_layer , combine = True )
616
615
container .restart (MYSQLD_SAFE_SERVICE )
617
616
618
- logger .debug ("Waiting for instance to be ready" )
617
+ logger .info ("Waiting for instance to be ready" )
619
618
self ._mysql .wait_until_mysql_connection (check_port = False )
620
619
621
- logger .info ("Configuring instance " )
620
+ logger .info ("Configuring initialized mysqld " )
622
621
# Configure all base users and revoke privileges from the root users
623
622
self ._mysql .configure_mysql_users (password_needed = False )
624
623
@@ -696,7 +695,7 @@ def _on_mysql_pebble_ready(self, event) -> None:
696
695
if self ._mysql .is_data_dir_initialised ():
697
696
# Data directory is already initialised, skip configuration
698
697
self .unit .status = MaintenanceStatus ("Starting mysqld" )
699
- logger .debug ("Data directory is already initialised, skipping configuration" )
698
+ logger .info ("Data directory is already initialised, skipping configuration" )
700
699
self ._reconcile_pebble_layer (container )
701
700
return
702
701
@@ -743,12 +742,12 @@ def _handle_potential_cluster_crash_scenario(self) -> bool:
743
742
if not self ._mysql .is_mysqld_running ():
744
743
return True
745
744
746
- only_single_unitialized_node_across_cluster = (
745
+ only_single_uninitialized_node_across_cluster = (
747
746
self .only_one_cluster_node_thats_uninitialized
748
747
)
749
748
750
749
if (
751
- not self .cluster_initialized and not only_single_unitialized_node_across_cluster
750
+ not self .cluster_initialized and not only_single_uninitialized_node_across_cluster
752
751
) or not self .unit_peer_data .get ("member-role" ):
753
752
return True
754
753
@@ -783,19 +782,19 @@ def _handle_potential_cluster_crash_scenario(self) -> bool:
783
782
# Add state 'offline' for this unit (self.peers.unit does not
784
783
# include this unit)
785
784
if (all_states | {"offline" } == {"offline" } and self .unit .is_leader ()) or (
786
- only_single_unitialized_node_across_cluster and all_states == {"waiting" }
785
+ only_single_uninitialized_node_across_cluster and all_states == {"waiting" }
787
786
):
788
787
# All instance are off, reboot cluster from outage from the leader unit
789
788
790
789
logger .info ("Attempting reboot from complete outage." )
791
790
try :
792
791
# Need condition to avoid rebooting on all units of application
793
- if self .unit .is_leader () or only_single_unitialized_node_across_cluster :
792
+ if self .unit .is_leader () or only_single_uninitialized_node_across_cluster :
794
793
self ._mysql .reboot_from_complete_outage ()
795
794
except MySQLRebootFromCompleteOutageError :
796
795
logger .error ("Failed to reboot cluster from complete outage." )
797
796
798
- if only_single_unitialized_node_across_cluster and all_states == {"waiting" }:
797
+ if only_single_uninitialized_node_across_cluster and all_states == {"waiting" }:
799
798
self ._mysql .drop_group_replication_metadata_schema ()
800
799
self .create_cluster ()
801
800
self .unit .status = ActiveStatus (self .active_status_message )
@@ -826,8 +825,7 @@ def _is_cluster_blocked(self) -> bool:
826
825
827
826
if not member_state or member_state == "restarting" :
828
827
# avoid changing status while tls is being set up or charm is being initialized
829
- logger .info ("Unit is waiting or restarting" )
830
- logger .debug (f"{ member_state = } " )
828
+ logger .info (f"Unit { member_state = } " )
831
829
return True
832
830
833
831
# avoid changing status while async replication is setting up
@@ -837,7 +835,7 @@ def _on_update_status(self, _: Optional[UpdateStatusEvent]) -> None:
837
835
"""Handle the update status event."""
838
836
if not self .upgrade .idle :
839
837
# avoid changing status while upgrade is in progress
840
- logger .debug ("Application is upgrading. Skipping." )
838
+ logger .info ("Application is upgrading. Skipping." )
841
839
return
842
840
if not self .unit .is_leader () and self ._is_unit_waiting_to_join_cluster ():
843
841
# join cluster test takes precedence over blocked test
@@ -851,7 +849,7 @@ def _on_update_status(self, _: Optional[UpdateStatusEvent]) -> None:
851
849
852
850
container = self .unit .get_container (CONTAINER_NAME )
853
851
if not container .can_connect ():
854
- logger .debug ("Cannot connect to pebble in the mysql container" )
852
+ logger .info ("Cannot connect to pebble in the mysql container" )
855
853
return
856
854
857
855
if self ._handle_potential_cluster_crash_scenario ():
0 commit comments