@@ -522,8 +522,8 @@ func (n *DefaultKonnectorAgent) waitForHelmUninstallCompletion(
522522 log logr.Logger ,
523523) error {
524524 // Create a context with timeout to avoid blocking cluster deletion indefinitely
525- // 90 seconds should be enough for most helm uninstalls while still being reasonable
526- waitCtx , cancel := context .WithTimeout (ctx , 90 * time .Second )
525+ // 30 seconds should be enough for most helm uninstalls while still being reasonable
526+ waitCtx , cancel := context .WithTimeout (ctx , 30 * time .Second )
527527 defer cancel ()
528528
529529 log .Info ("Monitoring HelmChartProxy deletion progress" , "name" , hcp .Name )
@@ -532,7 +532,7 @@ func (n *DefaultKonnectorAgent) waitForHelmUninstallCompletion(
532532 // This indicates CAAPH has acknowledged the deletion request
533533 err := wait .PollUntilContextTimeout (
534534 waitCtx ,
535- 2 * time .Second ,
535+ 3 * time .Second ,
536536 30 * time .Second ,
537537 true ,
538538 func (pollCtx context.Context ) (bool , error ) {
@@ -567,20 +567,5 @@ func (n *DefaultKonnectorAgent) waitForHelmUninstallCompletion(
567567 return fmt .Errorf ("error waiting for HelmChartProxy deletion: %w" , err )
568568 }
569569
570- // Additional wait to give CAAPH more time to complete the helm uninstall
571- // even after the HCP is deleted. This accounts for any cleanup operations.
572- log .Info ("HelmChartProxy deleted, allowing additional time for helm uninstall completion" )
573-
574- // Use a shorter additional wait to not delay cluster deletion too much
575- additionalWaitCtx , additionalCancel := context .WithTimeout (ctx , 30 * time .Second )
576- defer additionalCancel ()
577-
578- select {
579- case <- additionalWaitCtx .Done ():
580- log .Info ("Additional wait period completed, proceeding with cluster deletion" )
581- case <- time .After (10 * time .Second ):
582- log .Info ("Reasonable wait time elapsed, proceeding with cluster deletion" )
583- }
584-
585570 return nil
586571}
0 commit comments