Skip to content

Commit 377c408

Browse files
committed
refactor: use cached client tracker in the provider
Instead of creating workload cluster Kubernetes client each time and closing it after that, use CAPI standard class to cache the client. Signed-off-by: Artem Chernyshev <[email protected]>
1 parent 7716403 commit 377c408

File tree

4 files changed

+49
-58
lines changed

4 files changed

+49
-58
lines changed

controllers/configs.go

Lines changed: 4 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,8 @@ import (
1717
talosconfig "github.com/talos-systems/talos/pkg/machinery/client/config"
1818
corev1 "k8s.io/api/core/v1"
1919
v1 "k8s.io/api/core/v1"
20-
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2120
"k8s.io/apimachinery/pkg/types"
2221
"k8s.io/client-go/kubernetes"
23-
"k8s.io/client-go/tools/clientcmd"
2422
"k8s.io/client-go/util/connrotation"
2523
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
2624
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -43,41 +41,6 @@ func newDialer() *connrotation.Dialer {
4341
return connrotation.NewDialer((&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext)
4442
}
4543

46-
// kubeconfigForCluster will fetch a kubeconfig secret based on cluster name/namespace,
47-
// use it to create a clientset, and return it.
48-
func (r *TalosControlPlaneReconciler) kubeconfigForCluster(ctx context.Context, cluster client.ObjectKey) (*kubernetesClient, error) {
49-
kubeconfigSecret := &corev1.Secret{}
50-
51-
err := r.Client.Get(ctx,
52-
types.NamespacedName{
53-
Namespace: cluster.Namespace,
54-
Name: cluster.Name + "-kubeconfig",
55-
},
56-
kubeconfigSecret,
57-
)
58-
if err != nil {
59-
return nil, err
60-
}
61-
62-
config, err := clientcmd.RESTConfigFromKubeConfig(kubeconfigSecret.Data["value"])
63-
if err != nil {
64-
return nil, err
65-
}
66-
67-
dialer := newDialer()
68-
config.Dial = dialer.DialContext
69-
70-
clientset, err := kubernetes.NewForConfig(config)
71-
if err != nil {
72-
return nil, err
73-
}
74-
75-
return &kubernetesClient{
76-
Clientset: clientset,
77-
dialer: dialer,
78-
}, nil
79-
}
80-
8144
// talosconfigForMachine will generate a talosconfig that uses *all* found addresses as the endpoints.
8245
func (r *TalosControlPlaneReconciler) talosconfigForMachines(ctx context.Context, tcp *controlplanev1.TalosControlPlane, machines ...clusterv1.Machine) (*talosclient.Client, error) {
8346
if len(machines) == 0 {
@@ -132,7 +95,7 @@ func (r *TalosControlPlaneReconciler) talosconfigFromWorkloadCluster(ctx context
13295
return nil, fmt.Errorf("at least one machine should be provided")
13396
}
13497

135-
clientset, err := r.kubeconfigForCluster(ctx, cluster)
98+
c, err := r.Tracker.GetClient(ctx, cluster)
13699
if err != nil {
137100
return nil, err
138101
}
@@ -146,8 +109,10 @@ func (r *TalosControlPlaneReconciler) talosconfigFromWorkloadCluster(ctx context
146109
return nil, fmt.Errorf("%q machine does not have a nodeRef", machine.Name)
147110
}
148111

112+
var node v1.Node
113+
149114
// grab all addresses as endpoints
150-
node, err := clientset.CoreV1().Nodes().Get(ctx, machine.Status.NodeRef.Name, metav1.GetOptions{})
115+
err := c.Get(ctx, types.NamespacedName{Name: machine.Status.NodeRef.Name, Namespace: machine.Status.NodeRef.Namespace}, &node)
151116
if err != nil {
152117
return nil, err
153118
}

controllers/etcd.go

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,10 @@ import (
1313
"github.com/talos-systems/talos/pkg/machinery/api/machine"
1414
talosclient "github.com/talos-systems/talos/pkg/machinery/client"
1515
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
16-
"sigs.k8s.io/cluster-api/util"
1716
"sigs.k8s.io/controller-runtime/pkg/client"
1817
)
1918

2019
func (r *TalosControlPlaneReconciler) etcdHealthcheck(ctx context.Context, tcp *controlplanev1.TalosControlPlane, cluster *clusterv1.Cluster, ownedMachines []clusterv1.Machine) error {
21-
kubeclient, err := r.kubeconfigForCluster(ctx, util.ObjectKey(cluster))
22-
if err != nil {
23-
return err
24-
}
25-
26-
defer kubeclient.Close() //nolint:errcheck
27-
2820
machines := []clusterv1.Machine{}
2921

3022
for _, machine := range ownedMachines {

controllers/taloscontrolplane_controller.go

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,16 +24,19 @@ import (
2424
"google.golang.org/grpc/codes"
2525
"google.golang.org/grpc/status"
2626
corev1 "k8s.io/api/core/v1"
27+
v1 "k8s.io/api/core/v1"
2728
apierrors "k8s.io/apimachinery/pkg/api/errors"
2829
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2930
"k8s.io/apimachinery/pkg/labels"
3031
"k8s.io/apimachinery/pkg/runtime"
3132
"k8s.io/apimachinery/pkg/selection"
33+
"k8s.io/apimachinery/pkg/types"
3234
kerrors "k8s.io/apimachinery/pkg/util/errors"
3335
"k8s.io/apiserver/pkg/storage/names"
3436
"k8s.io/utils/pointer"
3537
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3638
"sigs.k8s.io/cluster-api/controllers/external"
39+
"sigs.k8s.io/cluster-api/controllers/remote"
3740
"sigs.k8s.io/cluster-api/util"
3841
"sigs.k8s.io/cluster-api/util/annotations"
3942
"sigs.k8s.io/cluster-api/util/conditions"
@@ -66,6 +69,7 @@ type TalosControlPlaneReconciler struct {
6669
APIReader client.Reader
6770
Log logr.Logger
6871
Scheme *runtime.Scheme
72+
Tracker *remote.ClusterCacheTracker
6973
}
7074

7175
func (r *TalosControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
@@ -309,19 +313,21 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,
309313

310314
r.Log.Info("Found control plane machines", "machines", len(machines))
311315

312-
kubeclient, err := r.kubeconfigForCluster(ctx, cluster)
316+
client, err := r.Tracker.GetClient(ctx, cluster)
313317
if err != nil {
314318
return ctrl.Result{RequeueAfter: 20 * time.Second}, err
315319
}
316320

317-
defer kubeclient.Close() //nolint:errcheck
318-
319321
deleteMachine := machines[0]
320322
for _, machine := range machines {
321323
if !machine.ObjectMeta.DeletionTimestamp.IsZero() {
322324
r.Log.Info("machine is in process of deletion", "machine", machine.Name)
323325

324-
node, err := kubeclient.CoreV1().Nodes().Get(ctx, machine.Status.NodeRef.Name, metav1.GetOptions{})
326+
var node v1.Node
327+
328+
name := types.NamespacedName{Name: machine.Status.NodeRef.Name, Namespace: machine.Status.NodeRef.Namespace}
329+
330+
err := client.Get(ctx, name, &node)
325331
if err != nil {
326332
// It's possible for the node to already be deleted in the workload cluster, so we just
327333
// requeue if that's that case instead of throwing a scary error.
@@ -333,7 +339,7 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,
333339

334340
r.Log.Info("Deleting node", "machine", machine.Name, "node", node.Name)
335341

336-
err = kubeclient.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{})
342+
err = client.Delete(ctx, &node)
337343
if err != nil {
338344
return ctrl.Result{RequeueAfter: 20 * time.Second}, err
339345
}
@@ -408,7 +414,11 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,
408414

409415
r.Log.Info("deleting node", "machine", deleteMachine.Name, "node", node.Name)
410416

411-
err = kubeclient.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{})
417+
n := &v1.Node{}
418+
n.Name = node.Name
419+
n.Namespace = node.Namespace
420+
421+
err = client.Delete(ctx, n)
412422
if err != nil {
413423
return ctrl.Result{RequeueAfter: 20 * time.Second}, err
414424
}
@@ -657,23 +667,23 @@ func (r *TalosControlPlaneReconciler) updateStatus(ctx context.Context, tcp *con
657667
return nil
658668
}
659669

660-
kubeclient, err := r.kubeconfigForCluster(ctx, util.ObjectKey(cluster))
670+
c, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
661671
if err != nil {
662672
r.Log.Info("failed to get kubeconfig for the cluster", "error", err)
663673

664674
return nil
665675
}
666676

667-
defer kubeclient.Close() //nolint:errcheck
668-
669677
nodeSelector := labels.NewSelector()
670678
req, err := labels.NewRequirement(constants.LabelNodeRoleMaster, selection.Exists, []string{})
671679
if err != nil {
672680
return err
673681
}
674682

675-
nodes, err := kubeclient.CoreV1().Nodes().List(ctx, metav1.ListOptions{
676-
LabelSelector: nodeSelector.Add(*req).String(),
683+
var nodes v1.NodeList
684+
685+
err = c.List(ctx, &nodes, &client.ListOptions{
686+
LabelSelector: nodeSelector.Add(*req),
677687
})
678688

679689
if err != nil {

main.go

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ limitations under the License.
1717
package main
1818

1919
import (
20+
"context"
2021
"flag"
2122
"math/rand"
2223
"os"
@@ -29,7 +30,9 @@ import (
2930
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
3031
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
3132
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
33+
"sigs.k8s.io/cluster-api/controllers/remote"
3234
ctrl "sigs.k8s.io/controller-runtime"
35+
"sigs.k8s.io/controller-runtime/pkg/client"
3336
"sigs.k8s.io/controller-runtime/pkg/controller"
3437
"sigs.k8s.io/controller-runtime/pkg/log/zap"
3538
// +kubebuilder:scaffold:imports
@@ -76,10 +79,31 @@ func main() {
7679
os.Exit(1)
7780
}
7881

82+
// Set up a ClusterCacheTracker to provide to controllers
83+
// requiring a connection to a remote cluster
84+
tracker, err := remote.NewClusterCacheTracker(mgr, remote.ClusterCacheTrackerOptions{
85+
Indexes: remote.DefaultIndexes,
86+
ClientUncachedObjects: []client.Object{},
87+
})
88+
if err != nil {
89+
setupLog.Error(err, "unable to create cluster cache tracker")
90+
os.Exit(1)
91+
}
92+
93+
if err := (&remote.ClusterCacheReconciler{
94+
Client: mgr.GetClient(),
95+
Log: ctrl.Log.WithName("remote").WithName("ClusterCacheReconciler"),
96+
Tracker: tracker,
97+
}).SetupWithManager(context.Background(), mgr, controller.Options{MaxConcurrentReconciles: 10}); err != nil {
98+
setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler")
99+
os.Exit(1)
100+
}
101+
79102
if err = (&controllers.TalosControlPlaneReconciler{
80103
Client: mgr.GetClient(),
81104
APIReader: mgr.GetAPIReader(),
82105
Log: ctrl.Log.WithName("controllers").WithName("TalosControlPlane"),
106+
Tracker: tracker,
83107
Scheme: mgr.GetScheme(),
84108
}).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: 10}); err != nil {
85109
setupLog.Error(err, "unable to create controller", "controller", "TalosControlPlane")

0 commit comments

Comments
 (0)