From 0c0dfe8f00daf575111d73b9b7488e443bf0e9ec Mon Sep 17 00:00:00 2001 From: makhov Date: Fri, 7 Mar 2025 14:13:24 +0200 Subject: [PATCH] Publishing events for observability Signed-off-by: Alexey Makhov Signed-off-by: makhov --- cmd/main.go | 7 ++++++- config/rbac/role.yaml | 7 +++++++ .../controlplane/k0s_controlplane_controller.go | 9 +++++++++ .../controlplane/k0s_controlplane_controller_test.go | 3 ++- .../controlplane/k0smotron_controlplane_controller.go | 6 ++++++ .../infrastructure/remote_machine_controller.go | 7 +++++++ 6 files changed, 37 insertions(+), 2 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 52aa98af8..ead092b10 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -155,6 +155,8 @@ func main() { os.Exit(1) } + recorder := mgr.GetEventRecorderFor("k0smotron") + restConfig, err := loadRestConfig() if err != nil { setupLog.Error(err, "unable to get cluster config") @@ -203,7 +205,7 @@ func main() { Scheme: mgr.GetScheme(), ClientSet: clientSet, RESTConfig: restConfig, - Recorder: mgr.GetEventRecorderFor("cluster-reconciler"), + Recorder: recorder, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "K0smotronCluster") os.Exit(1) @@ -224,6 +226,7 @@ func main() { Scheme: mgr.GetScheme(), ClientSet: clientSet, RESTConfig: restConfig, + Recorder: recorder, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "K0smotronControlPlane") os.Exit(1) @@ -233,6 +236,7 @@ func main() { Client: mgr.GetClient(), ClientSet: clientSet, RESTConfig: restConfig, + Recorder: recorder, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "K0sController") os.Exit(1) @@ -245,6 +249,7 @@ func main() { Scheme: mgr.GetScheme(), ClientSet: clientSet, RESTConfig: restConfig, + Recorder: recorder, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RemoteMachine") os.Exit(1) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 906b6dad6..f17ac9f52 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -209,6 +209,13 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch - apiGroups: - "" resources: diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index 6a7fe8950..745cf83f0 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/record" "github.com/go-logr/logr" "github.com/google/uuid" @@ -77,6 +78,7 @@ type K0sController struct { client.Client ClientSet *kubernetes.Clientset RESTConfig *rest.Config + Recorder record.EventRecorder // workloadClusterKubeClient is used during testing to inject a fake client workloadClusterKubeClient *kubernetes.Clientset } @@ -86,6 +88,7 @@ type K0sController struct { // +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { log := log.FromContext(ctx).WithValues("controlplane", req.NamespacedName) @@ -173,6 +176,11 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct if perr := c.Client.Patch(ctx, cluster, client.Merge); perr != nil { err = fmt.Errorf("failed to patch cluster: %w", perr) } + + // If the control plane became ready, emit an event + if existingStatus.Ready != kcp.Status.Ready { + c.Recorder.Eventf(kcp, corev1.EventTypeNormal, "ControlPlaneReady", "K0sControlPlane %s is ready", kcp.Name) + } } // Requeue the reconciliation if the status is not ready @@ -236,6 +244,7 @@ func (c *K0sController) reconcileKubeconfig(ctx context.Context, cluster *cluste logger.Error(err, "Failed to regenerate kubeconfig") return } + c.Recorder.Eventf(kcp, corev1.EventTypeNormal, "Provisioning", "Kubeconfig for control plane %s regenerated", kcp.Name) } } }() diff --git a/internal/controller/controlplane/k0s_controlplane_controller_test.go b/internal/controller/controlplane/k0s_controlplane_controller_test.go index 467ee34b9..07e43d0ec 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller_test.go +++ b/internal/controller/controlplane/k0s_controlplane_controller_test.go @@ -710,7 +710,8 @@ func TestReconcileKubeconfigCertsRotation(t *testing.T) { require.NoError(t, cc.LookupOrGenerate(ctx, testEnv, capiutil.ObjectKey(cluster), *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")))) r := &K0sController{ - Client: testEnv, + Client: testEnv, + Recorder: testEnv.GetEventRecorderFor("k0smotron"), } err = r.reconcileKubeconfig(ctx, cluster, kcp) require.NoError(t, err) diff --git a/internal/controller/controlplane/k0smotron_controlplane_controller.go b/internal/controller/controlplane/k0smotron_controlplane_controller.go index bdea01e21..ae82bf566 100644 --- a/internal/controller/controlplane/k0smotron_controlplane_controller.go +++ b/internal/controller/controlplane/k0smotron_controlplane_controller.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" "sigs.k8s.io/cluster-api/controllers/remote" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -57,6 +58,7 @@ type K0smotronController struct { Scheme *runtime.Scheme ClientSet *kubernetes.Clientset RESTConfig *rest.Config + Recorder record.EventRecorder } type Scope struct { @@ -167,6 +169,8 @@ func (c *K0smotronController) Reconcile(ctx context.Context, req ctrl.Request) ( } } + c.Recorder.Eventf(kcp, corev1.EventTypeNormal, "Reconciled", "K0smotronControlPlane reconciled") + // TODO: We need to have bit more detailed status and conditions handling kcp.Status.Ready = ready kcp.Status.ExternalManagedControlPlane = true @@ -315,6 +319,8 @@ func (c *K0smotronController) reconcile(ctx context.Context, cluster *clusterv1. return ctrl.Result{}, foundCluster.Status.Ready, nil } + c.Recorder.Event(kcp, corev1.EventTypeNormal, "Reconciling", "Reconciling K0smotronControlPlane") + return ctrl.Result{}, false, err } diff --git a/internal/controller/infrastructure/remote_machine_controller.go b/internal/controller/infrastructure/remote_machine_controller.go index 681ba5e90..b77553429 100644 --- a/internal/controller/infrastructure/remote_machine_controller.go +++ b/internal/controller/infrastructure/remote_machine_controller.go @@ -19,6 +19,7 @@ package infrastructure import ( "context" "fmt" + "k8s.io/client-go/tools/record" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -49,6 +50,7 @@ type RemoteMachineController struct { Scheme *runtime.Scheme ClientSet *kubernetes.Clientset RESTConfig *rest.Config + Recorder record.EventRecorder } type RemoteMachineMode int @@ -314,6 +316,8 @@ func (r *RemoteMachineController) reservePooledMachine(ctx context.Context, rm * } } + r.Recorder.Eventf(rm, v1.EventTypeNormal, "Reserved", "Reserved machine %s from pool %s", foundPooledMachine.Name, foundPooledMachine.Spec.Pool) + rm.Spec.Address = foundPooledMachine.Spec.Machine.Address rm.Spec.Port = foundPooledMachine.Spec.Machine.Port rm.Spec.User = foundPooledMachine.Spec.Machine.User @@ -347,6 +351,9 @@ func (r *RemoteMachineController) returnMachineToPool(ctx context.Context, rm *i if err := r.Status().Update(ctx, &pooledMachine); err != nil { return fmt.Errorf("failed to update pooled machine: %w", err) } + + r.Recorder.Eventf(rm, v1.EventTypeNormal, "Returned", "Returned machine %s to pool %s", pooledMachine.Name, pool) + return nil } }