Skip to content

Commit f9f2747

Browse files
committed
feature: add more intergation test case for qos sort plugin
1 parent 1a27c63 commit f9f2747

File tree

1 file changed

+230
-122
lines changed

1 file changed

+230
-122
lines changed

test/integration/qos_test.go

Lines changed: 230 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -42,132 +42,240 @@ import (
4242
"sigs.k8s.io/scheduler-plugins/test/util"
4343
)
4444

45-
func TestQOSPlugin(t *testing.T) {
46-
testCtx := &testContext{}
47-
testCtx.Ctx, testCtx.CancelFn = context.WithCancel(context.Background())
48-
49-
cs := kubernetes.NewForConfigOrDie(globalKubeConfig)
50-
testCtx.ClientSet = cs
51-
testCtx.KubeConfig = globalKubeConfig
52-
53-
cfg, err := util.NewDefaultSchedulerComponentConfig()
54-
if err != nil {
55-
t.Fatal(err)
56-
}
57-
cfg.Profiles[0].Plugins.QueueSort = schedapi.PluginSet{
58-
Enabled: []schedapi.Plugin{{Name: qos.Name}},
59-
Disabled: []schedapi.Plugin{{Name: "*"}},
60-
}
61-
62-
testCtx = initTestSchedulerWithOptions(
63-
t,
64-
testCtx,
65-
scheduler.WithProfiles(cfg.Profiles...),
66-
scheduler.WithFrameworkOutOfTreeRegistry(fwkruntime.Registry{qos.Name: qos.New}),
67-
)
68-
syncInformerFactory(testCtx)
69-
// Do not start the scheduler.
70-
// go testCtx.Scheduler.Run(testCtx.Ctx)
71-
defer cleanupTest(t, testCtx)
72-
73-
ns := fmt.Sprintf("integration-test-%v", string(uuid.NewUUID()))
74-
createNamespace(t, testCtx, ns)
75-
76-
// Create a Node.
77-
nodeName := "fake-node"
78-
node := st.MakeNode().Name("fake-node").Label("node", nodeName).Obj()
79-
node.Status.Capacity = v1.ResourceList{
80-
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
81-
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
82-
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
83-
}
84-
node, err = cs.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{})
85-
if err != nil {
86-
t.Fatalf("Failed to create Node %q: %v", nodeName, err)
87-
}
88-
89-
// Create 3 Pods.
90-
var pods []*v1.Pod
91-
podNames := []string{"bestefforts", "burstable", "guaranteed"}
92-
pause := imageutils.GetPauseImageName()
93-
for i := 0; i < len(podNames); i++ {
94-
pod := st.MakePod().Namespace(ns).Name(podNames[i]).Container(pause).Obj()
95-
pods = append(pods, pod)
96-
}
97-
// Make pods[0] BestEfforts (i.e., do nothing).
98-
// Make pods[1] Burstable.
99-
pods[1].Spec.Containers[0].Resources = v1.ResourceRequirements{
100-
Requests: v1.ResourceList{
101-
v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI),
102-
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
103-
},
104-
Limits: v1.ResourceList{
105-
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
106-
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI),
107-
},
108-
}
109-
// Make pods[2] Guaranteed.
110-
pods[2].Spec.Containers[0].Resources = v1.ResourceRequirements{
111-
Requests: v1.ResourceList{
112-
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
113-
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
114-
},
115-
Limits: v1.ResourceList{
116-
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
117-
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
118-
},
119-
}
120-
121-
// Create 3 Pods with the order: BestEfforts, Burstable, Guaranteed.
122-
// We will expect them to be scheduled in a reversed order.
123-
t.Logf("Start to create 3 Pods.")
124-
// Concurrently create all Pods.
125-
var wg sync.WaitGroup
126-
for _, pod := range pods {
127-
wg.Add(1)
128-
go func(p *v1.Pod) {
129-
defer wg.Done()
130-
_, err = cs.CoreV1().Pods(ns).Create(testCtx.Ctx, p, metav1.CreateOptions{})
45+
func TestQOSPluginSuite(t *testing.T) {
46+
t.Run("DifferentQoS", func(t *testing.T) {
47+
testCtx := &testContext{}
48+
testCtx.Ctx, testCtx.CancelFn = context.WithCancel(context.Background())
49+
50+
cs := kubernetes.NewForConfigOrDie(globalKubeConfig)
51+
testCtx.ClientSet = cs
52+
testCtx.KubeConfig = globalKubeConfig
53+
54+
cfg, err := util.NewDefaultSchedulerComponentConfig()
55+
if err != nil {
56+
t.Fatal(err)
57+
}
58+
cfg.Profiles[0].Plugins.QueueSort = schedapi.PluginSet{
59+
Enabled: []schedapi.Plugin{{Name: qos.Name}},
60+
Disabled: []schedapi.Plugin{{Name: "*"}},
61+
}
62+
63+
testCtx = initTestSchedulerWithOptions(
64+
t,
65+
testCtx,
66+
scheduler.WithProfiles(cfg.Profiles...),
67+
scheduler.WithFrameworkOutOfTreeRegistry(fwkruntime.Registry{qos.Name: qos.New}),
68+
)
69+
syncInformerFactory(testCtx)
70+
// Do not start the scheduler.
71+
// go testCtx.Scheduler.Run(testCtx.Ctx)
72+
defer cleanupTest(t, testCtx)
73+
74+
ns := fmt.Sprintf("integration-test-%v", string(uuid.NewUUID()))
75+
createNamespace(t, testCtx, ns)
76+
77+
// Create a Node.
78+
nodeName := "fake-node"
79+
node := st.MakeNode().Name("fake-node").Label("node", nodeName).Obj()
80+
node.Status.Capacity = v1.ResourceList{
81+
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
82+
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
83+
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
84+
}
85+
node, err = cs.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{})
86+
if err != nil {
87+
t.Fatalf("Failed to create Node %q: %v", nodeName, err)
88+
}
89+
90+
// Create 3 Pods.
91+
var pods []*v1.Pod
92+
podNames := []string{"bestefforts", "burstable", "guaranteed"}
93+
pause := imageutils.GetPauseImageName()
94+
for i := 0; i < len(podNames); i++ {
95+
pod := st.MakePod().Namespace(ns).Name(podNames[i]).Container(pause).Obj()
96+
pods = append(pods, pod)
97+
}
98+
// Make pods[0] BestEfforts (i.e., do nothing).
99+
// Make pods[1] Burstable.
100+
pods[1].Spec.Containers[0].Resources = v1.ResourceRequirements{
101+
Requests: v1.ResourceList{
102+
v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI),
103+
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
104+
},
105+
Limits: v1.ResourceList{
106+
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
107+
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI),
108+
},
109+
}
110+
// Make pods[2] Guaranteed.
111+
pods[2].Spec.Containers[0].Resources = v1.ResourceRequirements{
112+
Requests: v1.ResourceList{
113+
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
114+
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
115+
},
116+
Limits: v1.ResourceList{
117+
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
118+
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
119+
},
120+
}
121+
122+
// Create 3 Pods with the order: BestEfforts, Burstable, Guaranteed.
123+
// We will expect them to be scheduled in a reversed order.
124+
t.Logf("Start to create 3 Pods.")
125+
// Concurrently create all Pods.
126+
var wg sync.WaitGroup
127+
for _, pod := range pods {
128+
wg.Add(1)
129+
go func(p *v1.Pod) {
130+
defer wg.Done()
131+
_, err = cs.CoreV1().Pods(ns).Create(testCtx.Ctx, p, metav1.CreateOptions{})
132+
if err != nil {
133+
t.Errorf("Failed to create Pod %q: %v", p.Name, err)
134+
} else {
135+
t.Logf("Created Pod %q", p.Name)
136+
}
137+
}(pod)
138+
}
139+
wg.Wait()
140+
defer cleanupPods(t, testCtx, pods)
141+
142+
// Wait for all Pods are in the scheduling queue.
143+
err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
144+
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
145+
if len(pendingPods) == len(pods) {
146+
// Collect Pod names into a slice.
147+
podNames := make([]string, len(pendingPods))
148+
for i, podInfo := range pendingPods {
149+
podNames[i] = podInfo.Name
150+
}
151+
t.Logf("All Pods are in the pending queue: %v", strings.Join(podNames, ", "))
152+
return true, nil
153+
}
154+
return false, nil
155+
})
156+
if err != nil {
157+
t.Fatal(err)
158+
}
159+
160+
// Expect Pods are popped in the QoS class order.
161+
logger := klog.FromContext(testCtx.Ctx)
162+
expectedOrder := []string{"guaranteed", "burstable", "bestefforts"}
163+
actualOrder := make([]string, len(expectedOrder))
164+
for i := 0; i < len(expectedOrder); i++ {
165+
podInfo, _ := testCtx.Scheduler.NextPod(logger)
166+
actualOrder[i] = podInfo.Pod.Name
167+
t.Logf("Popped Pod %q", podInfo.Pod.Name)
168+
}
169+
if !reflect.DeepEqual(actualOrder, expectedOrder) {
170+
t.Errorf("Expected Pod order %v, but got %v", expectedOrder, actualOrder)
171+
} else {
172+
t.Logf("Pods were popped out in the expected order.")
173+
}
174+
})
175+
t.Run("SameQoSDifferentCreationTime", func(t *testing.T) {
176+
testCtx := &testContext{}
177+
testCtx.Ctx, testCtx.CancelFn = context.WithCancel(context.Background())
178+
179+
cs := kubernetes.NewForConfigOrDie(globalKubeConfig)
180+
testCtx.ClientSet = cs
181+
testCtx.KubeConfig = globalKubeConfig
182+
183+
cfg, err := util.NewDefaultSchedulerComponentConfig()
184+
if err != nil {
185+
t.Fatal(err)
186+
}
187+
cfg.Profiles[0].Plugins.QueueSort = schedapi.PluginSet{
188+
Enabled: []schedapi.Plugin{{Name: qos.Name}},
189+
Disabled: []schedapi.Plugin{{Name: "*"}},
190+
}
191+
192+
testCtx = initTestSchedulerWithOptions(
193+
t,
194+
testCtx,
195+
scheduler.WithProfiles(cfg.Profiles...),
196+
scheduler.WithFrameworkOutOfTreeRegistry(fwkruntime.Registry{qos.Name: qos.New}),
197+
)
198+
syncInformerFactory(testCtx)
199+
defer cleanupTest(t, testCtx)
200+
201+
ns := fmt.Sprintf("integration-test-same-qos-%v", string(uuid.NewUUID()))
202+
createNamespace(t, testCtx, ns)
203+
204+
// Create a Node.
205+
nodeName := "fake-node"
206+
node := st.MakeNode().Name(nodeName).Label("node", nodeName).Obj()
207+
node.Status.Capacity = v1.ResourceList{
208+
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
209+
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
210+
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
211+
}
212+
node, err = cs.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{})
213+
if err != nil {
214+
t.Fatalf("Failed to create Node %q: %v", nodeName, err)
215+
}
216+
217+
// Create 3 Pods with the same QoS class (Guaranteed) but different creation times.
218+
var pods []*v1.Pod
219+
podNames := []string{"guaranteed-1", "guaranteed-2", "guaranteed-3"}
220+
pause := imageutils.GetPauseImageName()
221+
for i := 0; i < len(podNames); i++ {
222+
pod := st.MakePod().Namespace(ns).Name(podNames[i]).Container(pause).Obj()
223+
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
224+
Requests: v1.ResourceList{
225+
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
226+
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
227+
},
228+
Limits: v1.ResourceList{
229+
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
230+
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
231+
},
232+
}
233+
pods = append(pods, pod)
234+
}
235+
236+
// Create Pods sequentially with a delay between each creation to ensure different creation timestamps.
237+
t.Logf("Start to create 3 Guaranteed Pods sequentially.")
238+
for _, pod := range pods {
239+
_, err = cs.CoreV1().Pods(ns).Create(testCtx.Ctx, pod, metav1.CreateOptions{})
131240
if err != nil {
132-
t.Errorf("Failed to create Pod %q: %v", p.Name, err)
241+
t.Errorf("Failed to create Pod %q: %v", pod.Name, err)
133242
} else {
134-
t.Logf("Created Pod %q", p.Name)
243+
t.Logf("Created Pod %q", pod.Name)
135244
}
136-
}(pod)
137-
}
138-
wg.Wait()
139-
defer cleanupPods(t, testCtx, pods)
140-
141-
// Wait for all Pods are in the scheduling queue.
142-
err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
143-
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
144-
if len(pendingPods) == len(pods) {
145-
// Collect Pod names into a slice.
146-
podNames := make([]string, len(pendingPods))
147-
for i, podInfo := range pendingPods {
148-
podNames[i] = podInfo.Name
245+
}
246+
defer cleanupPods(t, testCtx, pods)
247+
248+
// Wait for all Pods are in the scheduling queue.
249+
err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
250+
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
251+
if len(pendingPods) == len(pods) {
252+
// Collect Pod names into a slice.
253+
podNames := make([]string, len(pendingPods))
254+
for i, podInfo := range pendingPods {
255+
podNames[i] = podInfo.Name
256+
}
257+
t.Logf("All Pods are in the pending queue: %v", strings.Join(podNames, ", "))
258+
return true, nil
149259
}
150-
t.Logf("All Pods are in the pending queue: %v", strings.Join(podNames, ", "))
151-
return true, nil
260+
return false, nil
261+
})
262+
if err != nil {
263+
t.Fatal(err)
264+
}
265+
266+
// Expect Pods are popped in the order of their creation time (earliest first).
267+
logger := klog.FromContext(testCtx.Ctx)
268+
expectedOrder := podNames
269+
actualOrder := make([]string, len(expectedOrder))
270+
for i := 0; i < len(expectedOrder); i++ {
271+
podInfo, _ := testCtx.Scheduler.NextPod(logger)
272+
actualOrder[i] = podInfo.Pod.Name
273+
t.Logf("Popped Pod %q", podInfo.Pod.Name)
274+
}
275+
if !reflect.DeepEqual(actualOrder, expectedOrder) {
276+
t.Errorf("Expected Pod order %v, but got %v", expectedOrder, actualOrder)
277+
} else {
278+
t.Logf("Pods were popped out in the expected order based on creation time.")
152279
}
153-
return false, nil
154280
})
155-
if err != nil {
156-
t.Fatal(err)
157-
}
158-
159-
// Expect Pods are popped in the QoS class order.
160-
logger := klog.FromContext(testCtx.Ctx)
161-
expectedOrder := []string{"guaranteed", "burstable", "bestefforts"}
162-
actualOrder := make([]string, len(expectedOrder))
163-
for i := 0; i < len(expectedOrder); i++ {
164-
podInfo, _ := testCtx.Scheduler.NextPod(logger)
165-
actualOrder[i] = podInfo.Pod.Name
166-
t.Logf("Popped Pod %q", podInfo.Pod.Name)
167-
}
168-
if !reflect.DeepEqual(actualOrder, expectedOrder) {
169-
t.Errorf("Expected Pod order %v, but got %v", expectedOrder, actualOrder)
170-
} else {
171-
t.Logf("Pods were popped out in the expected order.")
172-
}
173281
}

0 commit comments

Comments
 (0)