@@ -93,8 +93,9 @@ type NodeDrainTimeoutSpecInput struct {
9393// * Verify Node drains for control plane and MachineDeployment Machines are blocked by WaitCompleted Pods
9494// * Force deleting the WaitCompleted Pods
9595// * Verify Node drains for control plane and MachineDeployment Machines are blocked by PDBs
96- // * Set NodeDrainTimeout to 1s to unblock Node drain
96+ // * Delete the unevictable pod PDBs
9797// * Verify machine deletion is blocked by waiting for volume detachment (only if VerifyNodeVolumeDetach is enabled)
98+ // * Set NodeDrainTimeout to 1s to unblock Node drain
9899// * Unblocks waiting for volume detachment (only if VerifyNodeVolumeDetach is enabled)
99100// * Verify scale down succeeded because Node drains were unblocked.
100101func NodeDrainTimeoutSpec (ctx context.Context , inputGetter func () NodeDrainTimeoutSpecInput ) {
@@ -526,26 +527,19 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo
526527 }, input .E2EConfig .GetIntervals (specName , "wait-machine-deleted" )... ).Should (Succeed ())
527528 }
528529
529- By ("Set NodeDrainTimeout to 1s to unblock Node drain" )
530- // Note: This also verifies that KCP & MachineDeployments are still propagating changes to NodeDrainTimeout down to
531- // Machines that already have a deletionTimestamp.
532- drainTimeout := & metav1.Duration {Duration : time .Duration (1 ) * time .Second }
533- modifyControlPlaneViaClusterAndWait (ctx , modifyControlPlaneViaClusterAndWaitInput {
534- ClusterProxy : input .BootstrapClusterProxy ,
535- Cluster : cluster ,
536- ModifyControlPlaneTopology : func (topology * clusterv1.ControlPlaneTopology ) {
537- topology .NodeDrainTimeout = drainTimeout
538- },
539- WaitForControlPlane : input .E2EConfig .GetIntervals (specName , "wait-control-plane" ),
540- })
541- modifyMachineDeploymentViaClusterAndWait (ctx , modifyMachineDeploymentViaClusterAndWaitInput {
542- ClusterProxy : input .BootstrapClusterProxy ,
543- Cluster : cluster ,
544- ModifyMachineDeploymentTopology : func (topology * clusterv1.MachineDeploymentTopology ) {
545- topology .NodeDrainTimeout = drainTimeout
546- },
547- WaitForMachineDeployments : input .E2EConfig .GetIntervals (specName , "wait-worker-nodes" ),
530+ By ("Delete PDB for all unevictable pods to let drain succeed" )
531+ framework .DeletePodDisruptionBudget (ctx , framework.DeletePodDisruptionBudgetInput {
532+ ClientSet : workloadClusterProxy .GetClientSet (),
533+ Budget : cpDeploymentWithPDBName (),
534+ Namespace : "unevictable-workload" ,
548535 })
536+ for _ , md := range machineDeployments {
537+ framework .DeletePodDisruptionBudget (ctx , framework.DeletePodDisruptionBudgetInput {
538+ ClientSet : workloadClusterProxy .GetClientSet (),
539+ Budget : mdDeploymentWithPDBName (md .Name ),
540+ Namespace : "unevictable-workload" ,
541+ })
542+ }
549543
550544 if input .VerifyNodeVolumeDetach {
551545 By ("Verify Node removal for control plane and MachineDeployment Machines are blocked (only by volume detachments)" )
@@ -575,6 +569,21 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo
575569 input .UnblockNodeVolumeDetachment (ctx , input .BootstrapClusterProxy , cluster )
576570 }
577571
572+ // Set NodeDrainTimeout and NodeVolumeDetachTimeout to let the second ControlPlane Node get deleted without requiring manual intervention.
573+ By ("Set NodeDrainTimeout and NodeVolumeDetachTimeout for ControlPlanes to 1s to unblock Node drain" )
574+ // Note: This also verifies that KCP & MachineDeployments are still propagating changes to NodeDrainTimeout down to
575+ // Machines that already have a deletionTimestamp.
576+ drainTimeout := & metav1.Duration {Duration : time .Duration (1 ) * time .Second }
577+ modifyControlPlaneViaClusterAndWait (ctx , modifyControlPlaneViaClusterAndWaitInput {
578+ ClusterProxy : input .BootstrapClusterProxy ,
579+ Cluster : cluster ,
580+ ModifyControlPlaneTopology : func (topology * clusterv1.ControlPlaneTopology ) {
581+ topology .NodeDrainTimeout = drainTimeout
582+ topology .NodeVolumeDetachTimeout = drainTimeout
583+ },
584+ WaitForControlPlane : input .E2EConfig .GetIntervals (specName , "wait-control-plane" ),
585+ })
586+
578587 By ("Verify scale down succeeded because Node drains and Volume detachments were unblocked" )
579588 // When we scale down the KCP, controlplane machines are deleted one by one, so it requires more time
580589 // MD Machine deletion is done in parallel and will be faster.
0 commit comments