Skip to content

Commit dddffaa

Browse files
authored
Merge pull request kubernetes#127399 from saku3/add_topologyspread_test
add integration test for podtopologyspread in requeueing scenarios
2 parents 2850d30 + b8bd6eb commit dddffaa

File tree

1 file changed

+154
-21
lines changed

1 file changed

+154
-21
lines changed

test/integration/scheduler/queue_test.go

Lines changed: 154 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -378,27 +378,6 @@ func TestCoreResourceEnqueue(t *testing.T) {
378378
return nil
379379
},
380380
},
381-
{
382-
name: "Pods with PodTopologySpread should be requeued when a Pod with matching label is scheduled",
383-
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
384-
initialPods: []*v1.Pod{
385-
st.MakePod().Name("pod1").Label("key", "val").Container("image").Node("fake-node").Obj(),
386-
},
387-
pods: []*v1.Pod{
388-
// - Pod2 will be rejected by the PodTopologySpread plugin.
389-
st.MakePod().Name("pod2").Label("key", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
390-
},
391-
triggerFn: func(testCtx *testutils.TestContext) error {
392-
// Trigger an assigned Pod add event.
393-
pod := st.MakePod().Name("pod3").Label("key", "val").Node("fake-node").Container("image").Obj()
394-
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Create(testCtx.Ctx, pod, metav1.CreateOptions{}); err != nil {
395-
return fmt.Errorf("failed to create Pod %q: %w", pod.Name, err)
396-
}
397-
398-
return nil
399-
},
400-
wantRequeuedPods: sets.New("pod2"),
401-
},
402381
{
403382
name: "Pod rejected by the PodAffinity plugin is requeued when deleting the existed pod's label to make it match the podAntiAffinity",
404383
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
@@ -507,6 +486,160 @@ func TestCoreResourceEnqueue(t *testing.T) {
507486
wantRequeuedPods: sets.New("pod2"),
508487
enableSchedulingQueueHint: []bool{true},
509488
},
489+
{
490+
name: "Pods with PodTopologySpread should be requeued when a Pod with matching label is scheduled",
491+
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
492+
initialPods: []*v1.Pod{
493+
st.MakePod().Name("pod1").Label("key1", "val").Container("image").Node("fake-node").Obj(),
494+
st.MakePod().Name("pod2").Label("key2", "val").Container("image").Node("fake-node").Obj(),
495+
},
496+
pods: []*v1.Pod{
497+
// - Pod3 and Pod4 will be rejected by the PodTopologySpread plugin.
498+
st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
499+
st.MakePod().Name("pod4").Label("key2", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key2").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
500+
},
501+
triggerFn: func(testCtx *testutils.TestContext) error {
502+
// Trigger an assigned Pod add event.
503+
// It should requeue pod3 only because this pod only has key1 label that pod3's topologyspread checks, and doesn't have key2 label that pod4's one does.
504+
pod := st.MakePod().Name("pod5").Label("key1", "val").Node("fake-node").Container("image").Obj()
505+
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Create(testCtx.Ctx, pod, metav1.CreateOptions{}); err != nil {
506+
return fmt.Errorf("failed to create Pod %q: %w", pod.Name, err)
507+
}
508+
509+
return nil
510+
},
511+
wantRequeuedPods: sets.New("pod3"),
512+
enableSchedulingQueueHint: []bool{true},
513+
},
514+
{
515+
name: "Pods with PodTopologySpread should be requeued when a scheduled Pod label is updated to match the selector",
516+
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
517+
initialPods: []*v1.Pod{
518+
st.MakePod().Name("pod1").Label("key1", "val").Container("image").Node("fake-node").Obj(),
519+
st.MakePod().Name("pod2").Label("key2", "val").Container("image").Node("fake-node").Obj(),
520+
},
521+
pods: []*v1.Pod{
522+
// - Pod3 and Pod4 will be rejected by the PodTopologySpread plugin.
523+
st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
524+
st.MakePod().Name("pod4").Label("key2", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key2").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
525+
},
526+
triggerFn: func(testCtx *testutils.TestContext) error {
527+
// Trigger an assigned Pod update event.
528+
// It should requeue pod3 only because this updated pod had key1 label,
529+
// and it's related only to the label selector that pod3's topologyspread has.
530+
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("key3", "val").Container("image").Node("fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
531+
return fmt.Errorf("failed to update the pod: %w", err)
532+
}
533+
return nil
534+
},
535+
wantRequeuedPods: sets.New("pod3"),
536+
enableSchedulingQueueHint: []bool{true},
537+
},
538+
{
539+
name: "Pods with PodTopologySpread should be requeued when a scheduled Pod with matching label is deleted",
540+
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
541+
initialPods: []*v1.Pod{
542+
st.MakePod().Name("pod1").Label("key1", "val").Container("image").Node("fake-node").Obj(),
543+
st.MakePod().Name("pod2").Label("key2", "val").Container("image").Node("fake-node").Obj(),
544+
},
545+
pods: []*v1.Pod{
546+
// - Pod3 and Pod4 will be rejected by the PodTopologySpread plugin.
547+
st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(2)), nil, nil, nil).Container("image").Obj(),
548+
st.MakePod().Name("pod4").Label("key2", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key2").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
549+
},
550+
triggerFn: func(testCtx *testutils.TestContext) error {
551+
// Trigger an assigned Pod delete event.
552+
// It should requeue pod3 only because this pod only has key1 label that pod3's topologyspread checks, and doesn't have key2 label that pod4's one does.
553+
if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
554+
return fmt.Errorf("failed to delete Pod: %w", err)
555+
}
556+
return nil
557+
},
558+
wantRequeuedPods: sets.New("pod3"),
559+
enableSchedulingQueueHint: []bool{true},
560+
},
561+
{
562+
name: "Pods with PodTopologySpread should be requeued when a Node with topology label is created",
563+
initialNodes: []*v1.Node{
564+
st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
565+
st.MakeNode().Name("fake-node2").Label("zone", "fake-zone").Obj(),
566+
},
567+
initialPods: []*v1.Pod{
568+
st.MakePod().Name("pod1").Label("key1", "val").Container("image").Node("fake-node1").Obj(),
569+
st.MakePod().Name("pod2").Label("key1", "val").Container("image").Node("fake-node2").Obj(),
570+
},
571+
pods: []*v1.Pod{
572+
// - Pod3 and Pod4 will be rejected by the PodTopologySpread plugin.
573+
st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(2)), nil, nil, nil).Container("image").Obj(),
574+
st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(2)), nil, nil, nil).Container("image").Obj(),
575+
},
576+
triggerFn: func(testCtx *testutils.TestContext) error {
577+
// Trigger an assigned Node add event.
578+
// It should requeue pod3 only because this node only has node label, and doesn't have zone label that pod4's topologyspread requires.
579+
node := st.MakeNode().Name("fake-node3").Label("node", "fake-node").Obj()
580+
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
581+
return fmt.Errorf("failed to create a new node: %w", err)
582+
}
583+
return nil
584+
},
585+
wantRequeuedPods: sets.New("pod3"),
586+
enableSchedulingQueueHint: []bool{true},
587+
},
588+
{
589+
name: "Pods with PodTopologySpread should be requeued when a Node is updated to have the topology label",
590+
initialNodes: []*v1.Node{
591+
st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
592+
st.MakeNode().Name("fake-node2").Label("node", "fake-node").Obj(),
593+
},
594+
initialPods: []*v1.Pod{
595+
st.MakePod().Name("pod1").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node1").Obj(),
596+
st.MakePod().Name("pod2").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
597+
},
598+
pods: []*v1.Pod{
599+
// - Pod3 and Pod4 will be rejected by the PodTopologySpread plugin.
600+
st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
601+
st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
602+
},
603+
triggerFn: func(testCtx *testutils.TestContext) error {
604+
// Trigger an assigned Node update event.
605+
// It should requeue pod4 only because this node only has zone label, and doesn't have node label that pod3 requires.
606+
node := st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Obj()
607+
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
608+
return fmt.Errorf("failed to update node: %w", err)
609+
}
610+
return nil
611+
},
612+
wantRequeuedPods: sets.New("pod4"),
613+
enableSchedulingQueueHint: []bool{true},
614+
},
615+
{
616+
name: "Pods with PodTopologySpread should be requeued when a NodeTaint of a Node with a topology label has been updated",
617+
initialNodes: []*v1.Node{
618+
st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
619+
st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Obj(),
620+
st.MakeNode().Name("fake-node3").Label("zone", "fake-node").Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj(),
621+
},
622+
initialPods: []*v1.Pod{
623+
st.MakePod().Name("pod1").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node1").Obj(),
624+
st.MakePod().Name("pod2").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
625+
},
626+
pods: []*v1.Pod{
627+
// - Pod3 and Pod4 will be rejected by the PodTopologySpread plugin.
628+
st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
629+
st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Toleration("aaa").Obj(),
630+
},
631+
triggerFn: func(testCtx *testutils.TestContext) error {
632+
// Trigger an assigned NodeTaint update event.
633+
// It should requeue pod4 only because this node only has zone label, and doesn't have node label that pod3 requires.
634+
node := st.MakeNode().Name("fake-node3").Label("zone", "fake-node").Taints([]v1.Taint{{Key: "aaa", Value: "bbb", Effect: v1.TaintEffectNoSchedule}}).Obj()
635+
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
636+
return fmt.Errorf("failed to update node: %w", err)
637+
}
638+
return nil
639+
},
640+
wantRequeuedPods: sets.New("pod4"),
641+
enableSchedulingQueueHint: []bool{true},
642+
},
510643
}
511644

512645
for _, tt := range tests {

0 commit comments

Comments
 (0)