Skip to content

Commit 51d12fa

Browse files
committed
operator: various sidecar fixes
Prior to this commit the operator sidecar's decommissioner and pvcunbinder controllers did not work. This was due to: - RBAC issues, the sidecar did not correctly scope itself to a single namespace. - Incorrect label selectors hidden within the controllers in question. Additionally, the statefulset decommissioner's sole test case has been disabled for quite sometime. There's been zero test coverage of this functionality. This commit: - Restores the decommissioner's tests to a working state - Strips out the "fetcher" to reduce duplication and remove reliance on fetching live helm values. - Replaces baked in filtering with a label selector argument that will be constructed by the helm chart. A follow up commit with chart changes and acceptance tests will be submitted. It's been made separate to ease the process of backporting to the v2.x.x branches. (cherry picked from commit 03dd394) # Conflicts: # operator/cmd/run/run.go # operator/go.mod # pkg/go.mod
1 parent 0d1eed3 commit 51d12fa

File tree

14 files changed

+384
-638
lines changed

14 files changed

+384
-638
lines changed

operator/cmd/run/run.go

Lines changed: 9 additions & 151 deletions
Original file line numberDiff line numberDiff line change
@@ -17,26 +17,18 @@ import (
1717
"fmt"
1818
"os"
1919
"path/filepath"
20-
"slices"
2120
"strings"
2221
"time"
2322

2423
"github.com/cockroachdb/errors"
2524
"github.com/spf13/cobra"
26-
"github.com/spf13/pflag"
2725
helmkube "helm.sh/helm/v3/pkg/kube"
28-
appsv1 "k8s.io/api/apps/v1"
2926
corev1 "k8s.io/api/core/v1"
30-
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3127
"k8s.io/apimachinery/pkg/labels"
32-
"k8s.io/apimachinery/pkg/types"
3328
_ "k8s.io/client-go/plugin/pkg/client/auth"
34-
"k8s.io/utils/ptr"
3529
ctrl "sigs.k8s.io/controller-runtime"
3630
"sigs.k8s.io/controller-runtime/pkg/cache"
3731
"sigs.k8s.io/controller-runtime/pkg/certwatcher"
38-
"sigs.k8s.io/controller-runtime/pkg/client"
39-
kubeClient "sigs.k8s.io/controller-runtime/pkg/client"
4032
"sigs.k8s.io/controller-runtime/pkg/healthz"
4133
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
4234
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
@@ -57,10 +49,10 @@ import (
5749
adminutils "github.com/redpanda-data/redpanda-operator/operator/pkg/admin"
5850
internalclient "github.com/redpanda-data/redpanda-operator/operator/pkg/client"
5951
consolepkg "github.com/redpanda-data/redpanda-operator/operator/pkg/console"
60-
pkglabels "github.com/redpanda-data/redpanda-operator/operator/pkg/labels"
6152
"github.com/redpanda-data/redpanda-operator/operator/pkg/resources"
6253
pkgsecrets "github.com/redpanda-data/redpanda-operator/operator/pkg/secrets"
6354
redpandawebhooks "github.com/redpanda-data/redpanda-operator/operator/webhooks/redpanda"
55+
"github.com/redpanda-data/redpanda-operator/pkg/pflagutil"
6456
)
6557

6658
type RedpandaController string
@@ -90,32 +82,6 @@ var availableControllers = []string{
9082
DecommissionController.toString(),
9183
}
9284

93-
type LabelSelectorValue struct {
94-
Selector labels.Selector
95-
}
96-
97-
var _ pflag.Value = ((*LabelSelectorValue)(nil))
98-
99-
func (s *LabelSelectorValue) Set(value string) error {
100-
if value == "" {
101-
return nil
102-
}
103-
var err error
104-
s.Selector, err = labels.Parse(value)
105-
return err
106-
}
107-
108-
func (s *LabelSelectorValue) String() string {
109-
if s.Selector == nil {
110-
return ""
111-
}
112-
return s.Selector.String()
113-
}
114-
115-
func (s *LabelSelectorValue) Type() string {
116-
return "label selector"
117-
}
118-
11985
// Metrics RBAC permissions
12086
// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create;
12187
// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create;
@@ -148,7 +114,7 @@ func Command() *cobra.Command {
148114
operatorMode bool
149115
ghostbuster bool
150116
unbindPVCsAfter time.Duration
151-
unbinderSelector LabelSelectorValue
117+
unbinderSelector pflagutil.LabelSelectorValue
152118
allowPVRebinding bool
153119
autoDeletePVCs bool
154120
webhookCertPath string
@@ -295,21 +261,6 @@ func Command() *cobra.Command {
295261
return cmd
296262
}
297263

298-
type v1Fetcher struct {
299-
client kubeClient.Client
300-
}
301-
302-
func (f *v1Fetcher) FetchLatest(ctx context.Context, name, namespace string) (any, error) {
303-
var vectorizedCluster vectorizedv1alpha1.Cluster
304-
if err := f.client.Get(ctx, types.NamespacedName{
305-
Name: name,
306-
Namespace: namespace,
307-
}, &vectorizedCluster); err != nil {
308-
return nil, err
309-
}
310-
return &vectorizedCluster, nil
311-
}
312-
313264
//nolint:funlen,gocyclo // length looks good
314265
func Run(
315266
ctx context.Context,
@@ -704,7 +655,11 @@ func Run(
704655
}
705656

706657
if enableGhostBrokerDecommissioner {
707-
d := decommissioning.NewStatefulSetDecommissioner(mgr, &v1Fetcher{client: mgr.GetClient()},
658+
factory := internalclient.NewFactory(mgr.GetConfig(), mgr.GetClient()).WithAdminClientTimeout(rpClientTimeout)
659+
adapter := vectorizedDecommissionerAdapter{factory: factory, client: mgr.GetClient()}
660+
d := decommissioning.NewStatefulSetDecommissioner(
661+
mgr,
662+
adapter.getAdminClient,
708663
decommissioning.WithSyncPeriod(ghostBrokerDecommissionerSyncPeriod),
709664
decommissioning.WithCleanupPVCs(false),
710665
// In Operator v1, decommissioning based on pod ordinal is not correct because
@@ -714,105 +669,8 @@ func Run(
714669
decommissioning.WithDecommisionOnTooHighOrdinal(false),
715670
// Operator v1 supports multiple NodePools, and therefore multiple STS.
716671
// This function provides a custom replica count: the desired replicas of all STS, instead of a single STS.
717-
decommissioning.WithDesiredReplicasFetcher(func(ctx context.Context, sts *appsv1.StatefulSet) (int32, error) {
718-
// Get Cluster CR, so we can then find its StatefulSets for a full count of desired replicas.
719-
idx := slices.IndexFunc(
720-
sts.OwnerReferences,
721-
func(ownerRef metav1.OwnerReference) bool {
722-
return ownerRef.APIVersion == vectorizedv1alpha1.GroupVersion.String() && ownerRef.Kind == "Cluster"
723-
})
724-
if idx == -1 {
725-
return 0, nil
726-
}
727-
728-
var vectorizedCluster vectorizedv1alpha1.Cluster
729-
if err := mgr.GetClient().Get(ctx, types.NamespacedName{
730-
Name: sts.OwnerReferences[idx].Name,
731-
Namespace: sts.Namespace,
732-
}, &vectorizedCluster); err != nil {
733-
return 0, fmt.Errorf("could not get Cluster: %w", err)
734-
}
735-
736-
// We assume the cluster is fine and synced, checks have been performed in the filter already.
737-
738-
// Get all nodepool-sts for this Cluster
739-
var stsList appsv1.StatefulSetList
740-
err := mgr.GetClient().List(ctx, &stsList, &client.ListOptions{
741-
LabelSelector: pkglabels.ForCluster(&vectorizedCluster).AsClientSelector(),
742-
})
743-
if err != nil {
744-
return 0, fmt.Errorf("failed to list statefulsets of Cluster: %w", err)
745-
}
746-
747-
if len(stsList.Items) == 0 {
748-
return 0, errors.New("found 0 StatefulSets for this Cluster")
749-
}
750-
751-
var allReplicas int32
752-
for _, sts := range stsList.Items {
753-
allReplicas += ptr.Deref(sts.Spec.Replicas, 0)
754-
}
755-
756-
// Should not happen, but if it actually happens, we don't want to run ghost broker decommissioner.
757-
if allReplicas < 3 {
758-
return 0, fmt.Errorf("found %d desiredReplicas, but want >= 3", allReplicas)
759-
}
760-
761-
if allReplicas != vectorizedCluster.Status.CurrentReplicas || allReplicas != vectorizedCluster.Status.Replicas {
762-
return 0, fmt.Errorf("replicas not synced. status.currentReplicas=%d,status.replicas=%d,allReplicas=%d", vectorizedCluster.Status.CurrentReplicas, vectorizedCluster.Status.Replicas, allReplicas)
763-
}
764-
765-
return allReplicas, nil
766-
}),
767-
decommissioning.WithFactory(internalclient.NewFactory(mgr.GetConfig(), mgr.GetClient())),
768-
decommissioning.WithFilter(func(ctx context.Context, sts *appsv1.StatefulSet) (bool, error) {
769-
log := ctrl.LoggerFrom(ctx, "namespace", sts.Namespace).WithName("StatefulSetDecomissioner.Filter")
770-
idx := slices.IndexFunc(
771-
sts.OwnerReferences,
772-
func(ownerRef metav1.OwnerReference) bool {
773-
return ownerRef.APIVersion == vectorizedv1alpha1.GroupVersion.String() && ownerRef.Kind == "Cluster"
774-
})
775-
if idx == -1 {
776-
return false, nil
777-
}
778-
779-
var vectorizedCluster vectorizedv1alpha1.Cluster
780-
if err := mgr.GetClient().Get(ctx, types.NamespacedName{
781-
Name: sts.OwnerReferences[idx].Name,
782-
Namespace: sts.Namespace,
783-
}, &vectorizedCluster); err != nil {
784-
return false, fmt.Errorf("could not get Cluster: %w", err)
785-
}
786-
787-
managedAnnotationKey := vectorizedv1alpha1.GroupVersion.Group + "/managed"
788-
if managed, exists := vectorizedCluster.Annotations[managedAnnotationKey]; exists && managed == "false" {
789-
log.V(1).Info("ignoring StatefulSet of unmanaged V1 Cluster", "sts", sts.Name, "namespace", sts.Namespace)
790-
return false, nil
791-
}
792-
793-
// Do some "manual" checks, as ClusterlQuiescent condition is always false if a ghost broker causes unhealthy cluster
794-
// (and we can therefore not use it to check if the cluster is synced otherwise)
795-
if vectorizedCluster.Status.CurrentReplicas != vectorizedCluster.Status.Replicas {
796-
log.V(1).Info("replicas are not synced", "cluster", vectorizedCluster.Name, "namespace", vectorizedCluster.Namespace)
797-
return false, nil
798-
}
799-
if vectorizedCluster.Status.Restarting {
800-
log.V(1).Info("cluster is restarting", "cluster", vectorizedCluster.Name, "namespace", vectorizedCluster.Namespace)
801-
return false, nil
802-
}
803-
804-
if vectorizedCluster.Status.ObservedGeneration != vectorizedCluster.Generation {
805-
log.V(1).Info("generation not synced", "cluster", vectorizedCluster.Name, "namespace", vectorizedCluster.Namespace, "generation", vectorizedCluster.Generation, "observedGeneration", vectorizedCluster.Status.ObservedGeneration)
806-
return false, nil
807-
}
808-
809-
if vectorizedCluster.Status.DecommissioningNode != nil {
810-
log.V(1).Info("decommission in progress", "cluster", vectorizedCluster.Name, "namespace", vectorizedCluster.Namespace, "node", *vectorizedCluster.Status.DecommissioningNode)
811-
return false, nil
812-
}
813-
814-
return true, nil
815-
}),
672+
decommissioning.WithDesiredReplicasFetcher(adapter.desiredReplicas),
673+
decommissioning.WithFilter(adapter.filter),
816674
)
817675
if err := d.SetupWithManager(mgr); err != nil {
818676
setupLog.Error(err, "unable to create controller", "controller", "StatefulSetDecommissioner")

operator/cmd/run/vectorized.go

Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
// Copyright 2025 Redpanda Data, Inc.
2+
//
3+
// Use of this software is governed by the Business Source License
4+
// included in the file licenses/BSL.md
5+
//
6+
// As of the Change Date specified in that file, in accordance with
7+
// the Business Source License, use of this software will be governed
8+
// by the Apache License, Version 2.0
9+
10+
package run
11+
12+
import (
13+
"context"
14+
"fmt"
15+
"slices"
16+
17+
"github.com/cockroachdb/errors"
18+
"github.com/redpanda-data/common-go/rpadmin"
19+
appsv1 "k8s.io/api/apps/v1"
20+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
21+
"k8s.io/apimachinery/pkg/types"
22+
"k8s.io/utils/ptr"
23+
ctrl "sigs.k8s.io/controller-runtime"
24+
"sigs.k8s.io/controller-runtime/pkg/client"
25+
26+
vectorizedv1alpha1 "github.com/redpanda-data/redpanda-operator/operator/api/vectorized/v1alpha1"
27+
internalclient "github.com/redpanda-data/redpanda-operator/operator/pkg/client"
28+
pkglabels "github.com/redpanda-data/redpanda-operator/operator/pkg/labels"
29+
)
30+
31+
// vectorizedDecommissionerAdapter is a helper struct that implements various methods
32+
// of mapping StatefulSets through Vectorized Clusters to arguments for the
33+
// StatefulSetDecommissioner.
34+
type vectorizedDecommissionerAdapter struct {
35+
client client.Client
36+
factory internalclient.ClientFactory
37+
}
38+
39+
func (b *vectorizedDecommissionerAdapter) desiredReplicas(ctx context.Context, sts *appsv1.StatefulSet) (int32, error) {
40+
// Get Cluster CR, so we can then find its StatefulSets for a full count of desired replicas.
41+
vectorizedCluster, err := b.getCluster(ctx, sts)
42+
if err != nil {
43+
return 0, err
44+
}
45+
46+
if vectorizedCluster == nil {
47+
return 0, nil
48+
}
49+
50+
// We assume the cluster is fine and synced, checks have been performed in the filter already.
51+
52+
// Get all nodepool-sts for this Cluster
53+
var stsList appsv1.StatefulSetList
54+
if err := b.client.List(ctx, &stsList, &client.ListOptions{
55+
LabelSelector: pkglabels.ForCluster(vectorizedCluster).AsClientSelector(),
56+
Namespace: vectorizedCluster.Namespace,
57+
}); err != nil {
58+
return 0, fmt.Errorf("failed to list statefulsets of Cluster: %w", err)
59+
}
60+
61+
if len(stsList.Items) == 0 {
62+
return 0, errors.New("found 0 StatefulSets for this Cluster")
63+
}
64+
65+
var allReplicas int32
66+
for _, sts := range stsList.Items {
67+
allReplicas += ptr.Deref(sts.Spec.Replicas, 0)
68+
}
69+
70+
// Should not happen, but if it actually happens, we don't want to run ghost broker decommissioner.
71+
if allReplicas < 3 {
72+
return 0, errors.Newf("found %d desiredReplicas, but want >= 3", allReplicas)
73+
}
74+
75+
if allReplicas != vectorizedCluster.Status.CurrentReplicas || allReplicas != vectorizedCluster.Status.Replicas {
76+
return 0, errors.Newf("replicas not synced. status.currentReplicas=%d,status.replicas=%d,allReplicas=%d", vectorizedCluster.Status.CurrentReplicas, vectorizedCluster.Status.Replicas, allReplicas)
77+
}
78+
79+
return allReplicas, nil
80+
}
81+
82+
func (b *vectorizedDecommissionerAdapter) filter(ctx context.Context, sts *appsv1.StatefulSet) (bool, error) {
83+
log := ctrl.LoggerFrom(ctx, "namespace", sts.Namespace).WithName("StatefulSetDecomissioner.Filter")
84+
85+
vectorizedCluster, err := b.getCluster(ctx, sts)
86+
if err != nil {
87+
return false, err
88+
}
89+
90+
if vectorizedCluster == nil {
91+
return false, nil
92+
}
93+
94+
managedAnnotationKey := vectorizedv1alpha1.GroupVersion.Group + "/managed"
95+
if managed, exists := vectorizedCluster.Annotations[managedAnnotationKey]; exists && managed == "false" {
96+
log.V(1).Info("ignoring StatefulSet of unmanaged V1 Cluster", "sts", sts.Name, "namespace", sts.Namespace)
97+
return false, nil
98+
}
99+
100+
// Do some "manual" checks, as ClusterlQuiescent condition is always false if a ghost broker causes unhealthy cluster
101+
// (and we can therefore not use it to check if the cluster is synced otherwise)
102+
if vectorizedCluster.Status.CurrentReplicas != vectorizedCluster.Status.Replicas {
103+
log.V(1).Info("replicas are not synced", "cluster", vectorizedCluster.Name, "namespace", vectorizedCluster.Namespace)
104+
return false, nil
105+
}
106+
if vectorizedCluster.Status.Restarting {
107+
log.V(1).Info("cluster is restarting", "cluster", vectorizedCluster.Name, "namespace", vectorizedCluster.Namespace)
108+
return false, nil
109+
}
110+
111+
if vectorizedCluster.Status.ObservedGeneration != vectorizedCluster.Generation {
112+
log.V(1).Info("generation not synced", "cluster", vectorizedCluster.Name, "namespace", vectorizedCluster.Namespace, "generation", vectorizedCluster.Generation, "observedGeneration", vectorizedCluster.Status.ObservedGeneration)
113+
return false, nil
114+
}
115+
116+
if vectorizedCluster.Status.DecommissioningNode != nil {
117+
log.V(1).Info("decommission in progress", "cluster", vectorizedCluster.Name, "namespace", vectorizedCluster.Namespace, "node", *vectorizedCluster.Status.DecommissioningNode)
118+
return false, nil
119+
}
120+
121+
return true, nil
122+
}
123+
124+
func (b *vectorizedDecommissionerAdapter) getAdminClient(ctx context.Context, sts *appsv1.StatefulSet) (*rpadmin.AdminAPI, error) {
125+
cluster, err := b.getCluster(ctx, sts)
126+
if err != nil {
127+
return nil, err
128+
}
129+
130+
if cluster == nil {
131+
return nil, errors.Newf("failed to resolve %s/%s to vectorized cluster", sts.Namespace, sts.Name)
132+
}
133+
134+
return b.factory.RedpandaAdminClient(ctx, cluster)
135+
}
136+
137+
func (b *vectorizedDecommissionerAdapter) getCluster(ctx context.Context, sts *appsv1.StatefulSet) (*vectorizedv1alpha1.Cluster, error) {
138+
idx := slices.IndexFunc(
139+
sts.OwnerReferences,
140+
func(ownerRef metav1.OwnerReference) bool {
141+
return ownerRef.APIVersion == vectorizedv1alpha1.GroupVersion.String() && ownerRef.Kind == "Cluster"
142+
})
143+
if idx == -1 {
144+
return nil, nil
145+
}
146+
147+
var vectorizedCluster vectorizedv1alpha1.Cluster
148+
if err := b.client.Get(ctx, types.NamespacedName{
149+
Name: sts.OwnerReferences[idx].Name,
150+
Namespace: sts.Namespace,
151+
}, &vectorizedCluster); err != nil {
152+
return nil, errors.Wrap(err, "could not get Cluster")
153+
}
154+
155+
return &vectorizedCluster, nil
156+
}

0 commit comments

Comments
 (0)