Skip to content

Commit f6fb0bd

Browse files
Add integration test for Default PodTopologySpread
Simulating a cluster with 500 nodes in 3 zones, deploying 3, 12 and 27 Pods belonging to the same service. Change-Id: I16425594012ea7bd24b888acedb12958360bff97
1 parent 161df49 commit f6fb0bd

File tree

1 file changed

+96
-2
lines changed

1 file changed

+96
-2
lines changed

test/integration/scheduler/priorities_test.go

Lines changed: 96 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
v1 "k8s.io/api/core/v1"
2626
apierrors "k8s.io/apimachinery/pkg/api/errors"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28+
"k8s.io/apimachinery/pkg/util/intstr"
2829
"k8s.io/apimachinery/pkg/util/wait"
2930
"k8s.io/kubernetes/pkg/scheduler"
3031
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
@@ -254,8 +255,8 @@ func makeContainersWithImages(images []string) []v1.Container {
254255
return containers
255256
}
256257

257-
// TestEvenPodsSpreadPriority verifies that EvenPodsSpread priority functions well.
258-
func TestEvenPodsSpreadPriority(t *testing.T) {
258+
// TestPodTopologySpreadScore verifies that the PodTopologySpread Score plugin works.
259+
func TestPodTopologySpreadScore(t *testing.T) {
259260
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
260261
defer testutils.CleanupTest(t, testCtx)
261262
cs := testCtx.ClientSet
@@ -361,3 +362,96 @@ func TestEvenPodsSpreadPriority(t *testing.T) {
361362
})
362363
}
363364
}
365+
366+
// TestDefaultPodTopologySpreadScore verifies that the PodTopologySpread Score plugin
367+
// with the system default spreading spreads Pods belonging to a Service.
368+
// The setup has 300 nodes over 3 zones.
369+
func TestDefaultPodTopologySpreadScore(t *testing.T) {
370+
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
371+
t.Cleanup(func() {
372+
testutils.CleanupTest(t, testCtx)
373+
})
374+
cs := testCtx.ClientSet
375+
ns := testCtx.NS.Name
376+
377+
zoneForNode := make(map[string]string)
378+
for i := 0; i < 300; i++ {
379+
nodeName := fmt.Sprintf("node-%d", i)
380+
zone := fmt.Sprintf("zone-%d", i%3)
381+
zoneForNode[nodeName] = zone
382+
_, err := createNode(cs, st.MakeNode().Name(nodeName).Label(v1.LabelHostname, nodeName).Label(v1.LabelZoneFailureDomainStable, zone).Obj())
383+
if err != nil {
384+
t.Fatalf("Cannot create node: %v", err)
385+
}
386+
}
387+
388+
serviceName := "test-service"
389+
svc := &v1.Service{
390+
ObjectMeta: metav1.ObjectMeta{
391+
Name: serviceName,
392+
Namespace: ns,
393+
},
394+
Spec: v1.ServiceSpec{
395+
Selector: map[string]string{
396+
"service": serviceName,
397+
},
398+
Ports: []v1.ServicePort{{
399+
Port: 80,
400+
TargetPort: intstr.FromInt(80),
401+
}},
402+
},
403+
}
404+
_, err := cs.CoreV1().Services(ns).Create(testCtx.Ctx, svc, metav1.CreateOptions{})
405+
if err != nil {
406+
t.Fatalf("Cannot create Service: %v", err)
407+
}
408+
409+
pause := imageutils.GetPauseImageName()
410+
totalPodCnt := 0
411+
for _, nPods := range []int{3, 9, 15} {
412+
// Append nPods each iteration.
413+
t.Run(fmt.Sprintf("%d-pods", totalPodCnt+nPods), func(t *testing.T) {
414+
for i := 0; i < nPods; i++ {
415+
p := st.MakePod().Name(fmt.Sprintf("p-%d", totalPodCnt)).Label("service", serviceName).Container(pause).Obj()
416+
_, err = cs.CoreV1().Pods(ns).Create(testCtx.Ctx, p, metav1.CreateOptions{})
417+
if err != nil {
418+
t.Fatalf("Cannot create Pod: %v", err)
419+
}
420+
totalPodCnt++
421+
}
422+
var pods []v1.Pod
423+
// Wait for all Pods scheduled.
424+
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
425+
podList, err := cs.CoreV1().Pods(ns).List(testCtx.Ctx, metav1.ListOptions{})
426+
if err != nil {
427+
t.Fatalf("Cannot list pods to verify scheduling: %v", err)
428+
}
429+
for _, p := range podList.Items {
430+
if p.Spec.NodeName == "" {
431+
return false, nil
432+
}
433+
}
434+
pods = podList.Items
435+
return true, nil
436+
})
437+
// Verify zone spreading.
438+
zoneCnts := make(map[string]int)
439+
for _, p := range pods {
440+
zoneCnts[zoneForNode[p.Spec.NodeName]]++
441+
}
442+
maxCnt := 0
443+
minCnt := len(pods)
444+
for _, c := range zoneCnts {
445+
if c > maxCnt {
446+
maxCnt = c
447+
}
448+
if c < minCnt {
449+
minCnt = c
450+
}
451+
}
452+
if skew := maxCnt - minCnt; skew != 0 {
453+
t.Errorf("Zone skew is %d, should be 0", skew)
454+
}
455+
})
456+
}
457+
}

0 commit comments

Comments
 (0)