@@ -30,9 +30,11 @@ import (
3030 . "github.com/onsi/ginkgo/v2"
3131 . "github.com/onsi/gomega"
3232 appsv1 "k8s.io/api/apps/v1"
33+ corev1 "k8s.io/api/core/v1"
3334 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3435 "k8s.io/apimachinery/pkg/runtime"
3536 restclient "k8s.io/client-go/rest"
37+ "k8s.io/client-go/tools/clientcmd"
3638 "k8s.io/klog/v2"
3739 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3840 "sigs.k8s.io/cluster-api/test/framework"
@@ -254,6 +256,7 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme,
254256 return clusterProvider , clusterProxy
255257}
256258
259+ // logStatusContinuously does log the state of the mgt-cluster and the wl-clusters continuously.
257260func logStatusContinuously (ctx context.Context , restConfig * restclient.Config , c client.Client ) {
258261 for {
259262 select {
@@ -269,33 +272,86 @@ func logStatusContinuously(ctx context.Context, restConfig *restclient.Config, c
269272 }
270273}
271274
275+ // logStatus logs the current state of the mgt-cluster and the wl-clusters once.
276+ // It gets called again and again by logStatusContinuously.
272277func logStatus (ctx context.Context , restConfig * restclient.Config , c client.Client ) error {
273- log ("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ <<< Start logging status" )
278+ log ("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" )
279+ log (fmt .Sprintf ("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ %s <<< Start logging status" , time .Now ().Format ("2006-01-02 15:04:05" )))
274280
275281 if err := logCaphDeployment (ctx , c ); err != nil {
276282 return err
277283 }
284+
278285 if err := logBareMetalHostStatus (ctx , c ); err != nil {
279286 return err
280287 }
288+
281289 if err := logHCloudMachineStatus (ctx , c ); err != nil {
282290 return err
283291 }
284- if err := logConditions (ctx , restConfig ); err != nil {
292+
293+ // Log the unhealthy conditions of the mgt-cluster
294+ if err := logConditions (ctx , "mgt-cluster" , restConfig ); err != nil {
285295 return err
286296 }
287- log ("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ End logging status >>>" )
297+
298+ // log the unhealthy conditions of the wl-clusters.
299+ clusterList := & clusterv1.ClusterList {}
300+ err := c .List (ctx , clusterList )
301+ if err != nil {
302+ return fmt .Errorf ("failed to list clusters: %w" , err )
303+ }
304+
305+ for _ , cluster := range clusterList .Items {
306+ // get the secret containing the kubeconfig.
307+ secretName := cluster .Name + "-kubeconfig"
308+ secret := & corev1.Secret {
309+ ObjectMeta : metav1.ObjectMeta {
310+ Name : secretName ,
311+ Namespace : cluster .Namespace ,
312+ },
313+ }
314+
315+ err := c .Get (ctx , client .ObjectKeyFromObject (secret ), secret )
316+ if err != nil {
317+ log (fmt .Sprintf ("Failed to get Secret %s/%s: %v" , cluster .Namespace , secretName , err ))
318+ continue
319+ }
320+
321+ data := secret .Data ["value" ]
322+ if len (data ) == 0 {
323+ log (fmt .Sprintf ("Failed to get Secret %s/%s: content is empty" , cluster .Namespace , secretName ))
324+ continue
325+ }
326+
327+ // create restConfig from kubeconfig.
328+ restConfig , err := clientcmd .RESTConfigFromKubeConfig (data )
329+ if err != nil {
330+ log (fmt .Sprintf ("Failed to create REST config from Secret %s/%s: %v" , cluster .Namespace , secretName , err ))
331+ continue
332+ }
333+
334+ // log the conditions of this wl-cluster
335+ err = logConditions (ctx , "wl-cluster " + cluster .Name , restConfig )
336+ if err != nil {
337+ log (fmt .Sprintf ("Failed to log Conditions %s/%s: %v" , cluster .Namespace , secretName , err ))
338+ continue
339+ }
340+ }
341+
342+ log (fmt .Sprintf ("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡ %s End logging status >>>" , time .Now ().Format ("2006-01-02 15:04:05" )))
288343
289344 return nil
290345}
291346
292- func logConditions (ctx context.Context , restConfig * restclient.Config ) error {
347+ func logConditions (ctx context.Context , clusterName string , restConfig * restclient.Config ) error {
293348 restConfig .QPS = - 1 // Since Kubernetes 1.29 "API Priority and Fairness" handles that.
294- counter , err := checkconditions .RunAndGetCounter (ctx , restConfig , checkconditions.Arguments {})
349+ counter , err := checkconditions .RunAndGetCounter (ctx , restConfig , & checkconditions.Arguments {})
295350 if err != nil {
296- return fmt .Errorf ("failed to get check conditions: %w" , err )
351+ return fmt .Errorf ("check conditions: %w" , err )
297352 }
298- log (fmt .Sprintf ("--------------------------------------------------- Unhealthy Conditions: %d" ,
353+ log (fmt .Sprintf ("----------------------------------------------- %s ---- Unhealthy Conditions: %d" ,
354+ clusterName ,
299355 len (counter .Lines )))
300356
301357 for _ , line := range counter .Lines {
@@ -417,13 +473,18 @@ func logBareMetalHostStatus(ctx context.Context, c client.Client) error {
417473 if hbmh .Spec .Status .ProvisioningState == "" {
418474 continue
419475 }
420- log ("BareMetalHost: " + hbmh .Name + " " + fmt .Sprint (hbmh .Spec .ServerID ))
421- log (" ProvisioningState: " + string (hbmh .Spec .Status .ProvisioningState ))
476+
477+ // log infos about that hbmh.
478+ log ("BareMetalHost: " + hbmh .Name + " " + fmt .Sprint (hbmh .Spec .ServerID ) +
479+ " | IPv4: " + hbmh .Spec .Status .IPv4 )
480+
481+ // Show an Error, if set.
422482 eMsg := string (hbmh .Spec .Status .ErrorType ) + " " + hbmh .Spec .Status .ErrorMessage
423483 eMsg = strings .TrimSpace (eMsg )
424484 if eMsg != "" {
425485 log (" Error: " + eMsg )
426486 }
487+
427488 readyC := conditions .Get (hbmh , clusterv1 .ReadyCondition )
428489 msg := ""
429490 reason := ""
@@ -433,7 +494,7 @@ func logBareMetalHostStatus(ctx context.Context, c client.Client) error {
433494 reason = readyC .Reason
434495 state = string (readyC .Status )
435496 }
436- log (" Ready Condition: " + state + " " + reason + " " + msg )
497+ log (" ProvisioningState: " + string ( hbmh . Spec . Status . ProvisioningState ) + " | Ready Condition: " + state + " " + reason + " " + msg )
437498 }
438499 return nil
439500}
0 commit comments