Skip to content
This repository was archived by the owner on Jul 18, 2025. It is now read-only.

Commit 7247e9f

Browse files
committed
fixing context losses for controller, if the controller gets restarted or updated
1 parent 04e2241 commit 7247e9f

File tree

5 files changed

+70
-4
lines changed

5 files changed

+70
-4
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ CAPI_KIND_CLUSTER_NAME ?= capi-test
230230

231231
# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971
232232

233-
TAG ?= v0.2.10-preview
233+
TAG ?= v0.2.11-preview
234234
ARCH ?= $(shell go env GOARCH)
235235
ALL_ARCH = amd64 arm arm64
236236

config/default/manager_image_patch.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,5 @@ spec:
77
template:
88
spec:
99
containers:
10-
- image: ghcr.io/patricklaabs/cluster-api-addon-provider-cdk8s/cluster-api-cdk8s-controller:v0.2.10-preview
10+
- image: ghcr.io/patricklaabs/cluster-api-addon-provider-cdk8s/cluster-api-cdk8s-controller:v0.2.11-preview
1111
name: manager

config/default/manager_image_patch.yaml-e

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,5 @@ spec:
77
template:
88
spec:
99
containers:
10-
- image: ghcr.io/patricklaabs/cluster-api-addon-provider-cdk8s/cluster-api-cdk8s-controller:v0.2.10-preview
10+
- image: ghcr.io/patricklaabs/cluster-api-addon-provider-cdk8s/cluster-api-cdk8s-controller:v0.2.11-preview
1111
name: manager

config/default/manager_pull_policy.yaml-e

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,4 @@ spec:
88
spec:
99
containers:
1010
- name: manager
11-
imagePullPolicy: IfNotPresent
11+
imagePullPolicy: Always

controllers/cdk8sappproxy/cdk8sappproxy_reconciler.go

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -561,6 +561,12 @@ func (r *Reconciler) checkGitOrAnnotationTriggers(cdk8sAppProxy *addonsv1alpha1.
561561

562562
func (r *Reconciler) handleSkipApply(ctx context.Context, cdk8sAppProxy *addonsv1alpha1.Cdk8sAppProxy, currentCommitHash string, logger logr.Logger) error {
563563
logger.Info("Skipping resource application: no Git changes, no deletion annotation, and all resources verified present.")
564+
565+
// Re-establish watches for existing resources after controller restart
566+
if err := r.reestablishWatchesForExistingResources(ctx, cdk8sAppProxy, logger); err != nil {
567+
logger.Error(err, "Failed to re-establish watches for existing resources")
568+
}
569+
564570
cdk8sAppProxy.Status.ObservedGeneration = cdk8sAppProxy.Generation
565571
conditions.MarkTrue(cdk8sAppProxy, addonsv1alpha1.DeploymentProgressingCondition)
566572

@@ -578,6 +584,66 @@ func (r *Reconciler) handleSkipApply(ctx context.Context, cdk8sAppProxy *addonsv
578584
return nil
579585
}
580586

587+
func (r *Reconciler) reestablishWatchesForExistingResources(ctx context.Context, cdk8sAppProxy *addonsv1alpha1.Cdk8sAppProxy, logger logr.Logger) error {
588+
// Get the source and parse resources to know what should be watched
589+
appSourcePath, _, cleanup, err := r.prepareSource(ctx, cdk8sAppProxy, types.NamespacedName{Name: cdk8sAppProxy.Name, Namespace: cdk8sAppProxy.Namespace}, logger)
590+
if err != nil {
591+
return err
592+
}
593+
defer cleanup()
594+
595+
parsedResources, err := r.synthesizeAndParseResources(appSourcePath, logger)
596+
if err != nil {
597+
return err
598+
}
599+
600+
// Get target clusters
601+
selector, err := metav1.LabelSelectorAsSelector(&cdk8sAppProxy.Spec.ClusterSelector)
602+
if err != nil {
603+
return err
604+
}
605+
606+
var clusterList clusterv1.ClusterList
607+
if err := r.List(ctx, &clusterList, client.MatchingLabelsSelector{Selector: selector}); err != nil {
608+
return err
609+
}
610+
611+
proxyNamespacedName := types.NamespacedName{Name: cdk8sAppProxy.Name, Namespace: cdk8sAppProxy.Namespace}
612+
613+
// Re-establish watches for each resource on each cluster
614+
for _, cluster := range clusterList.Items {
615+
dynamicClient, err := r.getDynamicClientForCluster(ctx, cluster.Namespace, cluster.Name)
616+
if err != nil {
617+
logger.Error(err, "Failed to get dynamic client for watch re-establishment", "cluster", cluster.Name)
618+
619+
continue
620+
}
621+
622+
for _, resource := range parsedResources {
623+
gvk := resource.GroupVersionKind()
624+
watchKey := string(cluster.GetUID()) + "/" + resource.GetNamespace() + "/" + resource.GetName() + "/" + gvk.String()
625+
626+
// Check if watch already exists using ActiveWatches map
627+
if r.ActiveWatches != nil && r.ActiveWatches[proxyNamespacedName] != nil {
628+
if _, exists := r.ActiveWatches[proxyNamespacedName][watchKey]; exists {
629+
logger.Info("Watch already exists, skipping re-establishment", "watchKey", watchKey, "cluster", cluster.Name)
630+
631+
continue
632+
}
633+
}
634+
635+
// Start the watch since it doesn't exist
636+
if err := r.startResourceWatch(ctx, dynamicClient, gvk, resource.GetNamespace(), resource.GetName(), proxyNamespacedName, watchKey); err != nil {
637+
logger.Error(err, "Failed to re-establish watch", "watchKey", watchKey, "cluster", cluster.Name)
638+
} else {
639+
logger.Info("Re-established watch for existing resource", "watchKey", watchKey, "cluster", cluster.Name)
640+
}
641+
}
642+
}
643+
644+
return nil
645+
}
646+
581647
//nolint:unparam // ctrl.Result is required for controller-runtime reconciler pattern
582648
func (r *Reconciler) applyResourcesToClusters(ctx context.Context, cdk8sAppProxy *addonsv1alpha1.Cdk8sAppProxy, parsedResources []*unstructured.Unstructured, clusterList clusterv1.ClusterList, currentCommitHash string, proxyNamespacedName types.NamespacedName, logger logr.Logger) (ctrl.Result, error) {
583649
logger.Info("Proceeding with application of resources to target clusters.")

0 commit comments

Comments
 (0)