diff --git a/controllers/clustercache/cluster_cache.go b/controllers/clustercache/cluster_cache.go index 06d64f39863f..c141fa392519 100644 --- a/controllers/clustercache/cluster_cache.go +++ b/controllers/clustercache/cluster_cache.go @@ -60,6 +60,11 @@ type Options struct { // will never be created. WatchFilterValue string + // ClusterFilter is a function that can be used to filter which clusters should be handled + // by the ClusterCache. If nil, all clusters will be handled. If set, only clusters for which + // the filter returns true will be handled. + ClusterFilter ClusterFilter + // Cache are the cache options for the caches that are created per cluster. Cache CacheOptions @@ -67,6 +72,10 @@ type Options struct { Client ClientOptions } +// ClusterFilter is a function that filters which clusters should be handled by the ClusterCache. +// It returns true if the cluster should be handled, false otherwise. +type ClusterFilter func(cluster *clusterv1.Cluster) bool + // CacheOptions are the cache options for the caches that are created per cluster. type CacheOptions struct { // SyncPeriod is the sync period of the cache. @@ -357,6 +366,11 @@ type clusterCache struct { // cacheCtxCancel is used during Shutdown to stop caches. cacheCtxCancel context.CancelCauseFunc + + // ClusterFilter is a function that can be used to filter which clusters should be handled + // by the ClusterCache. If nil, all clusters will be handled. If set, only clusters for which + // the filter returns true will be handled. + clusterFilter ClusterFilter } // clusterSource stores the necessary information so we can enqueue reconcile.Requests for reconcilers that @@ -451,6 +465,15 @@ func (cc *clusterCache) Reconcile(ctx context.Context, req reconcile.Request) (r return ctrl.Result{RequeueAfter: defaultRequeueAfter}, nil } + // Apply cluster filter if set + if cc.clusterFilter != nil && !cc.clusterFilter(cluster) { + log.V(6).Info("Cluster filtered out by ClusterFilter, not connecting") + accessor.Disconnect(ctx) + cc.deleteClusterAccessor(clusterKey) + cc.cleanupClusterSourcesForCluster(clusterKey) + return ctrl.Result{}, nil + } + // Return if infrastructure is not ready yet to avoid trying to open a connection when it cannot succeed. // Requeue is not needed as there will be a new reconcile.Request when Cluster.status.initialization.infrastructureProvisioned is set. if !ptr.Deref(cluster.Status.Initialization.InfrastructureProvisioned, false) { diff --git a/controllers/clustercache/cluster_cache_test.go b/controllers/clustercache/cluster_cache_test.go index ce4b910eb2e1..18c0298b0d70 100644 --- a/controllers/clustercache/cluster_cache_test.go +++ b/controllers/clustercache/cluster_cache_test.go @@ -57,6 +57,9 @@ func TestReconcile(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + "cluster.x-k8s.io/included-in-clustercache-tests": "true", + }, }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ @@ -87,6 +90,9 @@ func TestReconcile(t *testing.T) { clusterAccessorConfig: accessorConfig, clusterAccessors: make(map[client.ObjectKey]*clusterAccessor), cacheCtx: context.Background(), + clusterFilter: func(cluster *clusterv1.Cluster) bool { + return (cluster.ObjectMeta.Labels["cluster.x-k8s.io/included-in-clustercache-tests"] == "true") + }, } // Add a Cluster source and start it (queue will be later used to verify the source works correctly) @@ -110,6 +116,31 @@ func TestReconcile(t *testing.T) { testCluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true) g.Expect(env.Status().Patch(ctx, testCluster, patch)).To(Succeed()) + // Exclude from clustercache by changing the label + patch = client.MergeFrom(testCluster.DeepCopy()) + testCluster.ObjectMeta.Labels = map[string]string{ + "cluster.x-k8s.io/included-in-clustercache-tests": "false", + } + g.Expect(env.Patch(ctx, testCluster, patch)).To(Succeed()) + // Sanity check that the clusterFIlter does not include the cluster now + g.Expect(cc.clusterFilter(testCluster)).To((BeFalse())) + + // Reconcile, cluster should be ignored now + // => no requeue, no cluster accessor created + res, err = cc.Reconcile(ctx, reconcile.Request{NamespacedName: clusterKey}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{})) + g.Expect(res.IsZero()).To(BeTrue()) + + // Put the label back + patch = client.MergeFrom(testCluster.DeepCopy()) + testCluster.ObjectMeta.Labels = map[string]string{ + "cluster.x-k8s.io/included-in-clustercache-tests": "true", + } + g.Expect(env.Patch(ctx, testCluster, patch)).To(Succeed()) + // Sanity check that the clusterFIlter does include the cluster now + g.Expect(cc.clusterFilter(testCluster)).To((BeTrue())) + // Reconcile, kubeconfig Secret doesn't exist // => accessor.Connect will fail so we expect a retry with ConnectionCreationRetryInterval. res, err = cc.Reconcile(ctx, reconcile.Request{NamespacedName: clusterKey})