Skip to content

Commit 44af7d9

Browse files
feat: updating timout configs
1 parent f80421b commit 44af7d9

File tree

2 files changed

+5
-76
lines changed

2 files changed

+5
-76
lines changed

common/pkg/server/server.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,9 @@ func (s *Server) Start(ctx context.Context) error {
126126

127127
if t, ok := h.(lifecycle.BeforeClusterDelete); ok {
128128
if err := webhookServer.AddExtensionHandler(runtimeserver.ExtensionHandler{
129-
Hook: runtimehooksv1.BeforeClusterDelete,
130-
Name: strings.ToLower(h.Name()) + "-bcd",
131-
HandlerFunc: t.BeforeClusterDelete,
132-
TimeoutSeconds: intToInt32Ptr(30), // 30 seconds timeout
129+
Hook: runtimehooksv1.BeforeClusterDelete,
130+
Name: strings.ToLower(h.Name()) + "-bcd",
131+
HandlerFunc: t.BeforeClusterDelete,
133132
}); err != nil {
134133
setupLog.Error(err, "error adding handler")
135134
return err

pkg/handlers/lifecycle/konnectoragent/handler.go

Lines changed: 2 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,12 @@ import (
99
"fmt"
1010
"strings"
1111
"text/template"
12-
"time"
1312

1413
"github.com/go-logr/logr"
1514
"github.com/spf13/pflag"
1615
corev1 "k8s.io/api/core/v1"
1716
apierrors "k8s.io/apimachinery/pkg/api/errors"
1817
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
19-
"k8s.io/apimachinery/pkg/util/wait"
2018
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
2119
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
2220
ctrl "sigs.k8s.io/controller-runtime"
@@ -361,7 +359,7 @@ func (n *DefaultKonnectorAgent) BeforeClusterDelete(
361359
return
362360
case cleanupStatusInProgress:
363361
log.Info("Konnector Agent cleanup in progress, requesting retry")
364-
resp.SetStatus(runtimehooksv1.ResponseStatusSuccess)
362+
resp.SetStatus(runtimehooksv1.ResponseStatusFailure)
365363
resp.SetRetryAfterSeconds(5) // Retry after 5 seconds
366364
return
367365
case cleanupStatusNotStarted:
@@ -379,7 +377,7 @@ func (n *DefaultKonnectorAgent) BeforeClusterDelete(
379377

380378
// After initiating cleanup, request a retry to monitor completion
381379
log.Info("Konnector Agent cleanup initiated, will monitor progress")
382-
resp.SetStatus(runtimehooksv1.ResponseStatusSuccess)
380+
resp.SetStatus(runtimehooksv1.ResponseStatusFailure)
383381
resp.SetRetryAfterSeconds(5) // Quick retry to start monitoring
384382
}
385383

@@ -432,18 +430,6 @@ func (n *DefaultKonnectorAgent) deleteHelmChartProxy(
432430
)
433431
}
434432

435-
// Wait for CAAPH to complete the helm uninstall before allowing cluster deletion to proceed
436-
// This ensures graceful deletion order - helm uninstall completes before infrastructure teardown
437-
log.Info("Waiting for helm uninstall to complete before proceeding with cluster deletion", "name", hcp.Name)
438-
439-
if err := n.waitForHelmUninstallCompletion(ctx, hcp, log); err != nil {
440-
log.Error(err, "Helm uninstall did not complete gracefully, proceeding with cluster deletion", "name", hcp.Name)
441-
// Don't return error here - we want cluster deletion to proceed even if helm uninstall times out
442-
// The important thing is we gave it a reasonable chance to complete
443-
} else {
444-
log.Info("Helm uninstall completed successfully", "name", hcp.Name)
445-
}
446-
447433
return nil
448434
}
449435

@@ -486,59 +472,3 @@ func (n *DefaultKonnectorAgent) checkCleanupStatus(
486472
log.Info("HelmChartProxy exists, cleanup not started", "name", hcp.Name)
487473
return cleanupStatusNotStarted, nil
488474
}
489-
490-
// waitForHelmUninstallCompletion waits for CAAPH to complete the helm uninstall process
491-
// before allowing cluster deletion to proceed. This ensures graceful deletion order.
492-
func (n *DefaultKonnectorAgent) waitForHelmUninstallCompletion(
493-
ctx context.Context,
494-
hcp *caaphv1.HelmChartProxy,
495-
log logr.Logger,
496-
) error {
497-
// Create a context with timeout to avoid blocking cluster deletion indefinitely
498-
// 30 seconds should be enough for most helm uninstalls while still being reasonable
499-
waitCtx, cancel := context.WithTimeout(ctx, 25*time.Second)
500-
defer cancel()
501-
502-
log.Info("Monitoring HelmChartProxy deletion progress", "name", hcp.Name)
503-
504-
// First wait for the HelmChartProxy to be fully processed for deletion
505-
// This indicates CAAPH has acknowledged the deletion request
506-
err := wait.PollUntilContextTimeout(
507-
waitCtx,
508-
3*time.Second,
509-
22*time.Second,
510-
true,
511-
func(pollCtx context.Context) (bool, error) {
512-
currentHCP := &caaphv1.HelmChartProxy{}
513-
err := n.client.Get(pollCtx, ctrlclient.ObjectKeyFromObject(hcp), currentHCP)
514-
if err != nil {
515-
if apierrors.IsNotFound(err) {
516-
log.Info("HelmChartProxy has been deleted", "name", hcp.Name)
517-
return true, nil
518-
}
519-
// If we can't reach the API server, the cluster might be shutting down
520-
// In this case, we should not block cluster deletion
521-
log.Info("Error checking HelmChartProxy status, cluster may be shutting down", "error", err)
522-
return true, nil
523-
}
524-
525-
// Check if the HCP is in deletion phase
526-
if currentHCP.DeletionTimestamp != nil {
527-
log.Info("HelmChartProxy is being deleted, waiting for completion", "name", hcp.Name)
528-
return false, nil
529-
}
530-
531-
// If HCP still exists without deletion timestamp, something might be wrong
532-
log.Info("HelmChartProxy still exists, waiting for deletion to start", "name", hcp.Name)
533-
return false, nil
534-
},
535-
)
536-
if err != nil {
537-
if wait.Interrupted(err) {
538-
return fmt.Errorf("timeout waiting for HelmChartProxy deletion to complete")
539-
}
540-
return fmt.Errorf("error waiting for HelmChartProxy deletion: %w", err)
541-
}
542-
543-
return nil
544-
}

0 commit comments

Comments
 (0)