Skip to content

Commit 07103da

Browse files
authored
refactor: remove dynamic client usage and streamline Kubernetes client interactions (#1420)
* refactor: remove dynamic client usage and streamline Kubernetes client interactions - Eliminated the dynamic Kubernetes client from the codebase, simplifying client creation and usage across controllers and utility functions. - Updated relevant functions and method signatures to use the standard Kubernetes client interface, enhancing code clarity and maintainability. - Adjusted controller logic to ensure proper handling of Redis cluster and sentinel operations without the dynamic client. This refactor improves the overall architecture and reduces complexity in client management. Signed-off-by: yangw <[email protected]> * fix lint Signed-off-by: yangw <[email protected]> --------- Signed-off-by: yangw <[email protected]>
1 parent 2389f9c commit 07103da

File tree

10 files changed

+40
-137
lines changed

10 files changed

+40
-137
lines changed

internal/cmd/manager/cmd.go

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@ import (
3636
"github.com/OT-CONTAINER-KIT/redis-operator/internal/monitoring"
3737
coreWebhook "github.com/OT-CONTAINER-KIT/redis-operator/internal/webhook"
3838
"github.com/spf13/cobra"
39-
"k8s.io/client-go/dynamic"
4039
"k8s.io/client-go/kubernetes"
4140
ctrl "sigs.k8s.io/controller-runtime"
4241
"sigs.k8s.io/controller-runtime/pkg/cache"
@@ -119,11 +118,11 @@ func runManager(opts *managerOptions) error {
119118
setupLog.Error(err, "unable to start manager")
120119
return err
121120
}
122-
k8sClient, dk8sClient, err := createK8sClients()
121+
k8sClient, err := createK8sClient()
123122
if err != nil {
124123
return err
125124
}
126-
if err := setupControllers(mgr, k8sClient, dk8sClient, opts.maxConcurrentReconciles); err != nil {
125+
if err := setupControllers(mgr, k8sClient, opts.maxConcurrentReconciles); err != nil {
127126
return err
128127
}
129128
if opts.enableWebhooks {
@@ -180,27 +179,21 @@ func createControllerOptions(opts *managerOptions) ctrl.Options {
180179
return options
181180
}
182181

183-
// createK8sClients creates Kubernetes clients
184-
func createK8sClients() (kubernetes.Interface, dynamic.Interface, error) {
182+
// createK8sClient creates Kubernetes client
183+
func createK8sClient() (kubernetes.Interface, error) {
185184
k8sConfig := k8sutils.GenerateK8sConfig()
186185

187186
k8sClient, err := k8sutils.GenerateK8sClient(k8sConfig)
188187
if err != nil {
189188
setupLog.Error(err, "unable to create k8s client")
190-
return nil, nil, err
189+
return nil, err
191190
}
192191

193-
dk8sClient, err := k8sutils.GenerateK8sDynamicClient(k8sConfig)
194-
if err != nil {
195-
setupLog.Error(err, "unable to create k8s dynamic client")
196-
return nil, nil, err
197-
}
198-
199-
return k8sClient, dk8sClient, nil
192+
return k8sClient, nil
200193
}
201194

202195
// setupControllers sets up all controllers
203-
func setupControllers(mgr ctrl.Manager, k8sClient kubernetes.Interface, dk8sClient dynamic.Interface, maxConcurrentReconciles int) error {
196+
func setupControllers(mgr ctrl.Manager, k8sClient kubernetes.Interface, maxConcurrentReconciles int) error {
204197
// Get max concurrent reconciles from environment
205198
maxConcurrentReconciles = internalenv.GetMaxConcurrentReconciles(maxConcurrentReconciles)
206199

@@ -216,7 +209,6 @@ func setupControllers(mgr ctrl.Manager, k8sClient kubernetes.Interface, dk8sClie
216209
if err := (&redisclustercontroller.Reconciler{
217210
Client: mgr.GetClient(),
218211
K8sClient: k8sClient,
219-
Dk8sClient: dk8sClient,
220212
Healer: healer,
221213
Recorder: mgr.GetEventRecorderFor("rediscluster-controller"),
222214
StatefulSet: k8sutils.NewStatefulSetService(k8sClient),
@@ -237,7 +229,6 @@ func setupControllers(mgr ctrl.Manager, k8sClient kubernetes.Interface, dk8sClie
237229
Checker: redis.NewChecker(k8sClient),
238230
Healer: healer,
239231
K8sClient: k8sClient,
240-
Dk8sClient: dk8sClient,
241232
ReplicationWatcher: intctrlutil.NewResourceWatcher(),
242233
}).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}); err != nil {
243234
setupLog.Error(err, "unable to create controller", "controller", "RedisSentinel")

internal/controller/rediscluster/rediscluster_controller.go

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131
retry "github.com/avast/retry-go"
3232
appsv1 "k8s.io/api/apps/v1"
3333
corev1 "k8s.io/api/core/v1"
34-
"k8s.io/client-go/dynamic"
3534
"k8s.io/client-go/kubernetes"
3635
"k8s.io/client-go/tools/record"
3736
ctrl "sigs.k8s.io/controller-runtime"
@@ -44,10 +43,9 @@ import (
4443
type Reconciler struct {
4544
client.Client
4645
k8sutils.StatefulSet
47-
Healer redis.Healer
48-
K8sClient kubernetes.Interface
49-
Dk8sClient dynamic.Interface
50-
Recorder record.EventRecorder
46+
Healer redis.Healer
47+
K8sClient kubernetes.Interface
48+
Recorder record.EventRecorder
5149
}
5250

5351
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
@@ -128,7 +126,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
128126
// Mark the cluster status as initializing if there are no leader or follower nodes
129127
if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) ||
130128
instance.Status.ReadyLeaderReplicas != leaderReplicas {
131-
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterInitializing, rcvb2.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient)
129+
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterInitializing, rcvb2.InitializingClusterLeaderReason, instance.Status.ReadyLeaderReplicas, instance.Status.ReadyFollowerReplicas, r.Client)
132130
if err != nil {
133131
return intctrlutil.RequeueE(ctx, err, "")
134132
}
@@ -154,7 +152,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
154152
// Mark the cluster status as initializing if there are no follower nodes
155153
if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) ||
156154
instance.Status.ReadyFollowerReplicas != followerReplicas {
157-
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterInitializing, rcvb2.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Dk8sClient)
155+
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterInitializing, rcvb2.InitializingClusterFollowerReason, leaderReplicas, instance.Status.ReadyFollowerReplicas, r.Client)
158156
if err != nil {
159157
return intctrlutil.RequeueE(ctx, err, "")
160158
}
@@ -182,7 +180,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
182180

183181
// Mark the cluster status as bootstrapping if all the leader and follower nodes are ready
184182
if !(instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas) {
185-
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterBootstrap, rcvb2.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient)
183+
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterBootstrap, rcvb2.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Client)
186184
if err != nil {
187185
return intctrlutil.RequeueE(ctx, err, "")
188186
}
@@ -222,7 +220,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
222220
logger.Error(err, "failed to determine unhealthy node count in cluster")
223221
}
224222
if int(totalReplicas) > 1 && unhealthyNodeCount >= int(totalReplicas)-1 {
225-
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Dk8sClient)
223+
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterFailed, "RedisCluster has too many unhealthy nodes", leaderReplicas, followerReplicas, r.Client)
226224
if err != nil {
227225
return intctrlutil.RequeueE(ctx, err, "")
228226
}
@@ -269,7 +267,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
269267
return intctrlutil.RequeueE(ctx, err, "failed to set dynamic config")
270268
}
271269

272-
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterReady, rcvb2.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient)
270+
err = k8sutils.UpdateRedisClusterStatus(ctx, instance, rcvb2.RedisClusterReady, rcvb2.ReadyClusterReason, leaderReplicas, followerReplicas, r.Client)
273271
if err != nil {
274272
return intctrlutil.RequeueE(ctx, err, "")
275273
}

internal/controller/rediscluster/rediscluster_controller_suite_test.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ import (
2828
. "github.com/onsi/ginkgo/v2"
2929
. "github.com/onsi/gomega"
3030
"github.com/onsi/gomega/gexec"
31-
"k8s.io/client-go/dynamic"
3231
"k8s.io/client-go/kubernetes"
3332
"k8s.io/client-go/kubernetes/scheme"
3433
ctrl "sigs.k8s.io/controller-runtime"
@@ -96,13 +95,9 @@ var _ = BeforeSuite(func() {
9695
k8sClient, err := kubernetes.NewForConfig(cfg)
9796
Expect(err).ToNot(HaveOccurred())
9897

99-
dk8sClient, err := dynamic.NewForConfig(cfg)
100-
Expect(err).ToNot(HaveOccurred())
101-
10298
err = (&Reconciler{
10399
Client: k8sManager.GetClient(),
104100
K8sClient: k8sClient,
105-
Dk8sClient: dk8sClient,
106101
Healer: redis.NewHealer(k8sClient),
107102
Recorder: k8sManager.GetEventRecorderFor("rediscluster-controller"),
108103
StatefulSet: k8sutils.NewStatefulSetService(k8sClient),

internal/controller/redissentinel/redissentinel_controller.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ import (
1212
intctrlutil "github.com/OT-CONTAINER-KIT/redis-operator/internal/controllerutil"
1313
"github.com/OT-CONTAINER-KIT/redis-operator/internal/k8sutils"
1414
"k8s.io/apimachinery/pkg/types"
15-
"k8s.io/client-go/dynamic"
1615
"k8s.io/client-go/kubernetes"
1716
ctrl "sigs.k8s.io/controller-runtime"
1817
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -25,7 +24,6 @@ type RedisSentinelReconciler struct {
2524
Checker redis.Checker
2625
Healer redis.Healer
2726
K8sClient kubernetes.Interface
28-
Dk8sClient dynamic.Interface
2927
ReplicationWatcher *intctrlutil.ResourceWatcher
3028
}
3129

@@ -90,7 +88,7 @@ func (r *RedisSentinelReconciler) reconcileFinalizer(ctx context.Context, instan
9088
}
9189

9290
func (r *RedisSentinelReconciler) reconcileReplication(ctx context.Context, instance *rsvb2.RedisSentinel) (ctrl.Result, error) {
93-
if instance.Spec.RedisSentinelConfig != nil && !k8sutils.IsRedisReplicationReady(ctx, r.K8sClient, r.Dk8sClient, instance) {
91+
if instance.Spec.RedisSentinelConfig != nil && !k8sutils.IsRedisReplicationReady(ctx, r.K8sClient, r.Client, instance) {
9492
return intctrlutil.RequeueAfter(ctx, time.Second*10, "Redis Replication is specified but not ready")
9593
}
9694

@@ -111,7 +109,7 @@ func (r *RedisSentinelReconciler) reconcileReplication(ctx context.Context, inst
111109
}
112110

113111
func (r *RedisSentinelReconciler) reconcileSentinel(ctx context.Context, instance *rsvb2.RedisSentinel) (ctrl.Result, error) {
114-
if err := k8sutils.CreateRedisSentinel(ctx, r.K8sClient, instance, r.K8sClient, r.Dk8sClient); err != nil {
112+
if err := k8sutils.CreateRedisSentinel(ctx, r.K8sClient, instance, r.K8sClient, r.Client); err != nil {
115113
return intctrlutil.RequeueE(ctx, err, "")
116114
}
117115

internal/controller/redissentinel/redissentinel_controller_suite_test.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929
. "github.com/onsi/ginkgo/v2"
3030
. "github.com/onsi/gomega"
3131
"github.com/onsi/gomega/gexec"
32-
"k8s.io/client-go/dynamic"
3332
"k8s.io/client-go/kubernetes"
3433
"k8s.io/client-go/kubernetes/scheme"
3534
ctrl "sigs.k8s.io/controller-runtime"
@@ -100,17 +99,13 @@ var _ = BeforeSuite(func() {
10099
k8sClient, err := kubernetes.NewForConfig(cfg)
101100
Expect(err).ToNot(HaveOccurred())
102101

103-
dk8sClient, err := dynamic.NewForConfig(cfg)
104-
Expect(err).ToNot(HaveOccurred())
105-
106102
checker := redis.NewChecker(k8sClient)
107103
healer := redis.NewHealer(k8sClient)
108104
err = (&RedisSentinelReconciler{
109105
Client: k8sManager.GetClient(),
110106
Checker: checker,
111107
Healer: healer,
112108
K8sClient: k8sClient,
113-
Dk8sClient: dk8sClient,
114109
ReplicationWatcher: intctrlutil.NewResourceWatcher(),
115110
}).SetupWithManager(k8sManager, controller.Options{})
116111
Expect(err).ToNot(HaveOccurred())

internal/k8sutils/client.go

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
package k8sutils
22

33
import (
4-
// custom "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
5-
"k8s.io/client-go/dynamic"
64
"k8s.io/client-go/kubernetes"
75
"k8s.io/client-go/rest"
86
"k8s.io/client-go/tools/clientcmd"
@@ -19,15 +17,6 @@ func GenerateK8sClient(configProvider K8sConfigProvider) (kubernetes.Interface,
1917
return kubernetes.NewForConfig(config)
2018
}
2119

22-
// GenerateK8sClient create Dynamic client for kubernetes
23-
func GenerateK8sDynamicClient(configProvider K8sConfigProvider) (dynamic.Interface, error) {
24-
config, err := configProvider()
25-
if err != nil {
26-
return nil, err
27-
}
28-
return dynamic.NewForConfig(config)
29-
}
30-
3120
// GenerateK8sConfig will load the kube config file
3221
func GenerateK8sConfig() K8sConfigProvider {
3322
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()

internal/k8sutils/client_test.go

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -46,34 +46,3 @@ func TestGenerateK8sClient(t *testing.T) {
4646
})
4747
}
4848
}
49-
50-
func TestGenerateK8sDynamicClient(t *testing.T) {
51-
tests := []struct {
52-
name string
53-
configProvider func() (*rest.Config, error)
54-
wantErr bool
55-
}{
56-
{
57-
name: "valid config",
58-
configProvider: mockK8sConfigProvider,
59-
wantErr: false,
60-
},
61-
{
62-
name: "invalid config",
63-
configProvider: mockInvalidK8sConfigProvider,
64-
wantErr: true,
65-
},
66-
}
67-
68-
for _, tt := range tests {
69-
t.Run(tt.name, func(t *testing.T) {
70-
client, err := GenerateK8sDynamicClient(tt.configProvider)
71-
if tt.wantErr {
72-
assert.Error(t, err, "GenerateK8sDynamicClient() should return an error for invalid config")
73-
} else {
74-
assert.NoError(t, err, "GenerateK8sDynamicClient() should not return an error for valid config")
75-
assert.NotNil(t, client, "expected a non-nil Kubernetes client")
76-
}
77-
})
78-
}
79-
}

internal/k8sutils/redis-replication.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ import (
66
rrvb2 "github.com/OT-CONTAINER-KIT/redis-operator/api/redisreplication/v1beta2"
77
rsvb2 "github.com/OT-CONTAINER-KIT/redis-operator/api/redissentinel/v1beta2"
88
"github.com/OT-CONTAINER-KIT/redis-operator/internal/util"
9-
"k8s.io/client-go/dynamic"
109
"k8s.io/client-go/kubernetes"
1110
"k8s.io/utils/ptr"
11+
"sigs.k8s.io/controller-runtime/pkg/client"
1212
"sigs.k8s.io/controller-runtime/pkg/log"
1313
)
1414

@@ -221,7 +221,7 @@ func generateRedisReplicationInitContainerParams(cr *rrvb2.RedisReplication) ini
221221
return initcontainerProp
222222
}
223223

224-
func IsRedisReplicationReady(ctx context.Context, client kubernetes.Interface, dClient dynamic.Interface, rs *rsvb2.RedisSentinel) bool {
224+
func IsRedisReplicationReady(ctx context.Context, client kubernetes.Interface, ctrlClient client.Client, rs *rsvb2.RedisSentinel) bool {
225225
// statefulset name the same as the redis replication name
226226
sts, err := GetStatefulSet(ctx, client, rs.GetNamespace(), rs.Spec.RedisSentinelConfig.RedisReplicationName)
227227
if err != nil {
@@ -239,7 +239,7 @@ func IsRedisReplicationReady(ctx context.Context, client kubernetes.Interface, d
239239
// Enhanced check: When the pod is ready, it may not have been
240240
// created as part of a replication cluster, so we should verify
241241
// whether there is an actual master node.
242-
if master := getRedisReplicationMasterIP(ctx, client, rs, dClient); master == "" {
242+
if master := getRedisReplicationMasterIP(ctx, client, rs, ctrlClient); master == "" {
243243
return false
244244
}
245245
return true

0 commit comments

Comments
 (0)