Skip to content

Commit 27a8ef8

Browse files
committed
add integration test for nodeports in requeueing scenarios
1 parent 2ca0eb3 commit 27a8ef8

File tree

1 file changed

+63
-9
lines changed

1 file changed

+63
-9
lines changed

test/integration/scheduler/queue_test.go

Lines changed: 63 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -190,8 +190,10 @@ func TestCoreResourceEnqueue(t *testing.T) {
190190
name string
191191
// initialNode is the Node to be created at first.
192192
initialNodes []*v1.Node
193-
// initialPod is the Pod to be created at first if it's not empty.
194-
initialPod *v1.Pod
193+
// initialPods is the list of Pods to be created at first if it's not empty.
194+
// Note that the scheduler won't schedule those Pods,
195+
// meaning, those Pods should be already scheduled basically; they should have .spec.nodename.
196+
initialPods []*v1.Pod
195197
// pods are the list of Pods to be created.
196198
// All of them are expected to be unschedulable at first.
197199
pods []*v1.Pod
@@ -227,7 +229,9 @@ func TestCoreResourceEnqueue(t *testing.T) {
227229
{
228230
name: "Pod rejected by the PodAffinity plugin is requeued when a new Node is created and turned to ready",
229231
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
230-
initialPod: st.MakePod().Label("anti", "anti").Name("pod1").PodAntiAffinityExists("anti", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Node("fake-node").Obj(),
232+
initialPods: []*v1.Pod{
233+
st.MakePod().Label("anti", "anti").Name("pod1").PodAntiAffinityExists("anti", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Node("fake-node").Obj(),
234+
},
231235
pods: []*v1.Pod{
232236
// - Pod2 will be rejected by the PodAffinity plugin.
233237
st.MakePod().Label("anti", "anti").Name("pod2").PodAntiAffinityExists("anti", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Obj(),
@@ -381,7 +385,9 @@ func TestCoreResourceEnqueue(t *testing.T) {
381385
{
382386
name: "Pods with PodTopologySpread should be requeued when a Pod with matching label is scheduled",
383387
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
384-
initialPod: st.MakePod().Name("pod1").Label("key", "val").Container("image").Node("fake-node").Obj(),
388+
initialPods: []*v1.Pod{
389+
st.MakePod().Name("pod1").Label("key", "val").Container("image").Node("fake-node").Obj(),
390+
},
385391
pods: []*v1.Pod{
386392
// - Pod2 will be rejected by the PodTopologySpread plugin.
387393
st.MakePod().Name("pod2").Label("key", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
@@ -401,7 +407,9 @@ func TestCoreResourceEnqueue(t *testing.T) {
401407
{
402408
name: "Pod rejected by the PodAffinity plugin is requeued when deleting the existed pod's label to make it match the podAntiAffinity",
403409
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
404-
initialPod: st.MakePod().Name("pod1").Label("anti1", "anti1").Label("anti2", "anti2").Container("image").Node("fake-node").Obj(),
410+
initialPods: []*v1.Pod{
411+
st.MakePod().Name("pod1").Label("anti1", "anti1").Label("anti2", "anti2").Container("image").Node("fake-node").Obj(),
412+
},
405413
pods: []*v1.Pod{
406414
// - Pod2 and pod3 will be rejected by the PodAffinity plugin.
407415
st.MakePod().Name("pod2").Label("anti1", "anti1").PodAntiAffinityExists("anti1", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Obj(),
@@ -421,7 +429,9 @@ func TestCoreResourceEnqueue(t *testing.T) {
421429
{
422430
name: "Pod rejected by the PodAffinity plugin is requeued when updating the existed pod's label to make it match the pod's podAffinity",
423431
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
424-
initialPod: st.MakePod().Name("pod1").Container("image").Node("fake-node").Obj(),
432+
initialPods: []*v1.Pod{
433+
st.MakePod().Name("pod1").Container("image").Node("fake-node").Obj(),
434+
},
425435
pods: []*v1.Pod{
426436
// - Pod2 and pod3 will be rejected by the PodAffinity plugin.
427437
st.MakePod().Name("pod2").PodAffinityExists("aaa", "node", st.PodAffinityWithRequiredReq).Container("image").Obj(),
@@ -458,6 +468,50 @@ func TestCoreResourceEnqueue(t *testing.T) {
458468
wantRequeuedPods: sets.New("pod1"),
459469
enableSchedulingQueueHint: []bool{true},
460470
},
471+
{
472+
name: "Pod rejected with hostport by the NodePorts plugin is requeued when pod with common hostport is deleted",
473+
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
474+
initialPods: []*v1.Pod{
475+
st.MakePod().Name("pod1").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8080}}).Node("fake-node").Obj(),
476+
st.MakePod().Name("pod2").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8081}}).Node("fake-node").Obj(),
477+
},
478+
pods: []*v1.Pod{
479+
st.MakePod().Name("pod3").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8080}}).Obj(),
480+
st.MakePod().Name("pod4").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8081}}).Obj(),
481+
},
482+
triggerFn: func(testCtx *testutils.TestContext) error {
483+
// Trigger an assigned Pod delete event.
484+
// Because Pod1 and Pod3 have common port, deleting Pod1 makes Pod3 schedulable.
485+
// By setting GracePeriodSeconds to 0, allowing Pod3 to be requeued immediately after Pod1 is deleted.
486+
if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
487+
return fmt.Errorf("failed to delete Pod: %w", err)
488+
}
489+
return nil
490+
},
491+
wantRequeuedPods: sets.New("pod3"),
492+
enableSchedulingQueueHint: []bool{true},
493+
},
494+
{
495+
name: "Pod rejected with hostport by the NodePorts plugin is requeued when new node is created",
496+
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
497+
initialPods: []*v1.Pod{
498+
st.MakePod().Name("pod1").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8080}}).Node("fake-node").Obj(),
499+
},
500+
pods: []*v1.Pod{
501+
st.MakePod().Name("pod2").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8080}}).Obj(),
502+
},
503+
triggerFn: func(testCtx *testutils.TestContext) error {
504+
// Trigger a NodeCreated event.
505+
// It makes Pod2 schedulable.
506+
node := st.MakeNode().Name("fake-node2").Label("node", "fake-node2").Obj()
507+
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
508+
return fmt.Errorf("failed to create a new node: %w", err)
509+
}
510+
return nil
511+
},
512+
wantRequeuedPods: sets.New("pod2"),
513+
enableSchedulingQueueHint: []bool{true},
514+
},
461515
}
462516

463517
for _, tt := range tests {
@@ -488,9 +542,9 @@ func TestCoreResourceEnqueue(t *testing.T) {
488542
}
489543
}
490544

491-
if tt.initialPod != nil {
492-
if _, err := cs.CoreV1().Pods(ns).Create(ctx, tt.initialPod, metav1.CreateOptions{}); err != nil {
493-
t.Fatalf("Failed to create an initial Pod %q: %v", tt.initialPod.Name, err)
545+
for _, pod := range tt.initialPods {
546+
if _, err := cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
547+
t.Fatalf("Failed to create an initial Pod %q: %v", pod.Name, err)
494548
}
495549
}
496550

0 commit comments

Comments
 (0)