Skip to content
This repository was archived by the owner on Apr 25, 2023. It is now read-only.

Commit 3157e45

Browse files
committed
fix: ignore non-targeted clusters during deletion
During the deletion of resources, the controller errors if any cluster is marked as non-ready. That leads to objects being stuck during deletion while these clusters are present, even if the resource is not deployed in such clusters. This commit makes the reconciliation for deletion to compute the placement of the federated resources and ignore other clusters. This way, the deletion will fail just if the non-ready clusters are the clusters where the object is federated.
1 parent f5d5f20 commit 3157e45

File tree

1 file changed

+52
-8
lines changed

1 file changed

+52
-8
lines changed

pkg/controller/sync/controller.go

Lines changed: 52 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,19 @@ func (s *KubeFedSyncController) reconcile(qualifiedName util.QualifiedName) util
268268
apiResource := s.typeConfig.GetTargetType()
269269
gvk := apiResourceToGVK(&apiResource)
270270
klog.V(2).Infof("Ensuring the removal of the label %q from %s %q in member clusters.", util.ManagedByKubeFedLabelKey, gvk.Kind, qualifiedName)
271-
err = s.removeManagedLabel(gvk, qualifiedName)
271+
// We can't compute resource placement, therefore we try to
272+
// remove it from all member clusters.
273+
clusters, err := s.informer.GetClusters()
274+
if err != nil {
275+
wrappedErr := errors.Wrap(err, "failed to get member clusters")
276+
runtime.HandleError(wrappedErr)
277+
return util.StatusError
278+
}
279+
clusterNames := sets.NewString()
280+
for _, cluster := range clusters {
281+
clusterNames = clusterNames.Insert(cluster.Name)
282+
}
283+
err = s.removeManagedLabel(gvk, qualifiedName, clusterNames)
272284
if err != nil {
273285
wrappedErr := errors.Wrapf(err, "failed to remove the label %q from %s %q in member clusters", util.ManagedByKubeFedLabelKey, gvk.Kind, qualifiedName)
274286
runtime.HandleError(wrappedErr)
@@ -501,7 +513,19 @@ func (s *KubeFedSyncController) ensureDeletion(fedResource FederatedResource) ut
501513
return util.StatusError
502514
}
503515
klog.V(2).Infof("Initiating the removal of the label %q from resources previously managed by %s %q.", util.ManagedByKubeFedLabelKey, kind, key)
504-
err = s.removeManagedLabel(fedResource.TargetGVK(), fedResource.TargetName())
516+
clusters, err := s.informer.GetClusters()
517+
if err != nil {
518+
wrappedErr := errors.Wrap(err, "failed to get member clusters")
519+
runtime.HandleError(wrappedErr)
520+
return util.StatusError
521+
}
522+
targetClusters, err := fedResource.ComputePlacement(clusters)
523+
if err != nil {
524+
wrappedErr := errors.Wrapf(err, "failed to compute placement for %s %q", kind, key)
525+
runtime.HandleError(wrappedErr)
526+
return util.StatusError
527+
}
528+
err = s.removeManagedLabel(fedResource.TargetGVK(), fedResource.TargetName(), targetClusters)
505529
if err != nil {
506530
wrappedErr := errors.Wrapf(err, "failed to remove the label %q from all resources previously managed by %s %q", util.ManagedByKubeFedLabelKey, kind, key)
507531
runtime.HandleError(wrappedErr)
@@ -533,8 +557,8 @@ func (s *KubeFedSyncController) ensureDeletion(fedResource FederatedResource) ut
533557

534558
// removeManagedLabel attempts to remove the managed label from
535559
// resources with the given name in member clusters.
536-
func (s *KubeFedSyncController) removeManagedLabel(gvk schema.GroupVersionKind, qualifiedName util.QualifiedName) error {
537-
ok, err := s.handleDeletionInClusters(gvk, qualifiedName, func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured) {
560+
func (s *KubeFedSyncController) removeManagedLabel(gvk schema.GroupVersionKind, qualifiedName util.QualifiedName, clusters sets.String) error {
561+
ok, err := s.handleDeletionInClusters(gvk, qualifiedName, clusters, func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured) {
538562
if clusterObj.GetDeletionTimestamp() != nil {
539563
return
540564
}
@@ -554,8 +578,17 @@ func (s *KubeFedSyncController) deleteFromClusters(fedResource FederatedResource
554578
gvk := fedResource.TargetGVK()
555579
qualifiedName := fedResource.TargetName()
556580

581+
clusters, err := s.informer.GetClusters()
582+
if err != nil {
583+
return false, err
584+
}
585+
targetClusters, err := fedResource.ComputePlacement(clusters)
586+
if err != nil {
587+
return false, err
588+
}
589+
557590
remainingClusters := []string{}
558-
ok, err := s.handleDeletionInClusters(gvk, qualifiedName, func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured) {
591+
ok, err := s.handleDeletionInClusters(gvk, qualifiedName, targetClusters, func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured) {
559592
// If the containing namespace of a FederatedNamespace is
560593
// marked for deletion, it is impossible to require the
561594
// removal of the namespace in advance of removal of the sync
@@ -615,9 +648,17 @@ func (s *KubeFedSyncController) ensureRemovedOrUnmanaged(fedResource FederatedRe
615648
return errors.Wrap(err, "failed to get a list of clusters")
616649
}
617650

651+
targetClusters, err := fedResource.ComputePlacement(clusters)
652+
if err != nil {
653+
return errors.Wrapf(err, "failed to compute placement for %s %q", fedResource.FederatedKind(), fedResource.FederatedName().Name)
654+
}
655+
618656
dispatcher := dispatch.NewCheckUnmanagedDispatcher(s.informer.GetClientForCluster, fedResource.TargetGVK(), fedResource.TargetName())
619657
unreadyClusters := []string{}
620658
for _, cluster := range clusters {
659+
if !targetClusters.Has(cluster.Name) {
660+
continue
661+
}
621662
if !util.IsClusterReady(&cluster.Status) {
622663
unreadyClusters = append(unreadyClusters, cluster.Name)
623664
continue
@@ -639,18 +680,21 @@ func (s *KubeFedSyncController) ensureRemovedOrUnmanaged(fedResource FederatedRe
639680

640681
// handleDeletionInClusters invokes the provided deletion handler for
641682
// each managed resource in member clusters.
642-
func (s *KubeFedSyncController) handleDeletionInClusters(gvk schema.GroupVersionKind, qualifiedName util.QualifiedName,
683+
func (s *KubeFedSyncController) handleDeletionInClusters(gvk schema.GroupVersionKind, qualifiedName util.QualifiedName, clusters sets.String,
643684
deletionFunc func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured)) (bool, error) {
644-
clusters, err := s.informer.GetClusters()
685+
memberClusters, err := s.informer.GetClusters()
645686
if err != nil {
646687
return false, errors.Wrap(err, "failed to get a list of clusters")
647688
}
648689

649690
dispatcher := dispatch.NewUnmanagedDispatcher(s.informer.GetClientForCluster, gvk, qualifiedName)
650691
retrievalFailureClusters := []string{}
651692
unreadyClusters := []string{}
652-
for _, cluster := range clusters {
693+
for _, cluster := range memberClusters {
653694
clusterName := cluster.Name
695+
if !clusters.Has(clusterName) {
696+
continue
697+
}
654698

655699
if !util.IsClusterReady(&cluster.Status) {
656700
unreadyClusters = append(unreadyClusters, clusterName)

0 commit comments

Comments
 (0)