Skip to content

Commit 94df29b

Browse files
authored
Merge pull request kubernetes#127464 from sanposhiho/trigger-nodedelete
fix(eventhandler): trigger Node/Delete event
2 parents 1137a6a + 421f87a commit 94df29b

File tree

2 files changed

+57
-3
lines changed

2 files changed

+57
-3
lines changed

pkg/scheduler/eventhandlers.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,8 @@ func (sched *Scheduler) deleteNodeFromCache(obj interface{}) {
134134
return
135135
}
136136

137+
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.NodeDelete, node, nil, nil)
138+
137139
logger.V(3).Info("Delete event for node", "node", klog.KObj(node))
138140
if err := sched.Cache.RemoveNode(logger, node); err != nil {
139141
logger.Error(err, "Scheduler cache RemoveNode failed")

test/integration/scheduler/queue_test.go

Lines changed: 55 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -685,7 +685,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
685685
st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(2)), nil, nil, nil).Container("image").Obj(),
686686
},
687687
triggerFn: func(testCtx *testutils.TestContext) error {
688-
// Trigger an assigned Node add event.
688+
// Trigger an Node add event.
689689
// It should requeue pod3 only because this node only has node label, and doesn't have zone label that pod4's topologyspread requires.
690690
node := st.MakeNode().Name("fake-node3").Label("node", "fake-node").Obj()
691691
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
@@ -712,7 +712,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
712712
st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
713713
},
714714
triggerFn: func(testCtx *testutils.TestContext) error {
715-
// Trigger an assigned Node update event.
715+
// Trigger an Node update event.
716716
// It should requeue pod4 only because this node only has zone label, and doesn't have node label that pod3 requires.
717717
node := st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Obj()
718718
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
@@ -723,6 +723,58 @@ func TestCoreResourceEnqueue(t *testing.T) {
723723
wantRequeuedPods: sets.New("pod4"),
724724
enableSchedulingQueueHint: []bool{true},
725725
},
726+
{
727+
name: "Pods with PodTopologySpread should be requeued when a Node with a topology label is deleted (QHint: enabled)",
728+
initialNodes: []*v1.Node{
729+
st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
730+
st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Obj(),
731+
},
732+
initialPods: []*v1.Pod{
733+
st.MakePod().Name("pod1").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node1").Obj(),
734+
st.MakePod().Name("pod2").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
735+
},
736+
pods: []*v1.Pod{
737+
// - Pod3 and Pod4 will be rejected by the PodTopologySpread plugin.
738+
st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
739+
st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
740+
},
741+
triggerFn: func(testCtx *testutils.TestContext) error {
742+
// Trigger an NodeTaint delete event.
743+
// It should requeue pod4 only because this node only has zone label, and doesn't have node label that pod3 requires.
744+
if err := testCtx.ClientSet.CoreV1().Nodes().Delete(testCtx.Ctx, "fake-node2", metav1.DeleteOptions{}); err != nil {
745+
return fmt.Errorf("failed to update node: %w", err)
746+
}
747+
return nil
748+
},
749+
wantRequeuedPods: sets.New("pod4"),
750+
enableSchedulingQueueHint: []bool{true},
751+
},
752+
{
753+
name: "Pods with PodTopologySpread should be requeued when a Node with a topology label is deleted (QHint: disabled)",
754+
initialNodes: []*v1.Node{
755+
st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
756+
st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Obj(),
757+
},
758+
initialPods: []*v1.Pod{
759+
st.MakePod().Name("pod1").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node1").Obj(),
760+
st.MakePod().Name("pod2").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
761+
},
762+
pods: []*v1.Pod{
763+
// - Pod3 and Pod4 will be rejected by the PodTopologySpread plugin.
764+
st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
765+
st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
766+
},
767+
triggerFn: func(testCtx *testutils.TestContext) error {
768+
// Trigger an NodeTaint delete event.
769+
// It should requeue both pod3 and pod4 only because PodTopologySpread subscribes to Node/delete events.
770+
if err := testCtx.ClientSet.CoreV1().Nodes().Delete(testCtx.Ctx, "fake-node2", metav1.DeleteOptions{}); err != nil {
771+
return fmt.Errorf("failed to update node: %w", err)
772+
}
773+
return nil
774+
},
775+
wantRequeuedPods: sets.New("pod3", "pod4"),
776+
enableSchedulingQueueHint: []bool{false},
777+
},
726778
{
727779
name: "Pods with PodTopologySpread should be requeued when a NodeTaint of a Node with a topology label has been updated",
728780
initialNodes: []*v1.Node{
@@ -740,7 +792,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
740792
st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Toleration("aaa").Obj(),
741793
},
742794
triggerFn: func(testCtx *testutils.TestContext) error {
743-
// Trigger an assigned NodeTaint update event.
795+
// Trigger an NodeTaint update event.
744796
// It should requeue pod4 only because this node only has zone label, and doesn't have node label that pod3 requires.
745797
node := st.MakeNode().Name("fake-node3").Label("zone", "fake-node").Taints([]v1.Taint{{Key: "aaa", Value: "bbb", Effect: v1.TaintEffectNoSchedule}}).Obj()
746798
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {

0 commit comments

Comments
 (0)