Skip to content

Commit caab8b7

Browse files
committed
EvenPodsSpread: integration test
1 parent 270d952 commit caab8b7

File tree

7 files changed

+355
-16
lines changed

7 files changed

+355
-16
lines changed

pkg/scheduler/testing/wrappers.go

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,14 @@ limitations under the License.
1717
package testing
1818

1919
import (
20+
"fmt"
21+
2022
"k8s.io/api/core/v1"
2123
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2224
)
2325

26+
var zero int64
27+
2428
// NodeSelectorWrapper wraps a NodeSelector inside.
2529
type NodeSelectorWrapper struct{ v1.NodeSelector }
2630

@@ -152,6 +156,27 @@ func (p *PodWrapper) Namespace(s string) *PodWrapper {
152156
return p
153157
}
154158

159+
// Container appends a container into PodSpec of the inner pod.
160+
func (p *PodWrapper) Container(s string) *PodWrapper {
161+
p.Spec.Containers = append(p.Spec.Containers, v1.Container{
162+
Name: fmt.Sprintf("con%d", len(p.Spec.Containers)),
163+
Image: s,
164+
})
165+
return p
166+
}
167+
168+
// Priority sets a priority value into PodSpec of the inner pod.
169+
func (p *PodWrapper) Priority(val int32) *PodWrapper {
170+
p.Spec.Priority = &val
171+
return p
172+
}
173+
174+
// ZeroTerminationGracePeriod sets the TerminationGracePeriodSeconds of the inner pod to zero.
175+
func (p *PodWrapper) ZeroTerminationGracePeriod() *PodWrapper {
176+
p.Spec.TerminationGracePeriodSeconds = &zero
177+
return p
178+
}
179+
155180
// Node sets `s` as the nodeName of the inner pod.
156181
func (p *PodWrapper) Node(s string) *PodWrapper {
157182
p.Spec.NodeName = s

pkg/scheduler/util/utils.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ package util
1818

1919
import (
2020
"sort"
21-
2221
"time"
2322

2423
"k8s.io/api/core/v1"

test/integration/scheduler/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ go_test(
3737
"//pkg/scheduler/factory:go_default_library",
3838
"//pkg/scheduler/framework/v1alpha1:go_default_library",
3939
"//pkg/scheduler/nodeinfo:go_default_library",
40+
"//pkg/scheduler/testing:go_default_library",
4041
"//pkg/volume:go_default_library",
4142
"//pkg/volume/testing:go_default_library",
4243
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",

test/integration/scheduler/predicates_test.go

Lines changed: 168 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,19 @@ limitations under the License.
1717
package scheduler
1818

1919
import (
20+
"fmt"
2021
"testing"
2122
"time"
2223

2324
"k8s.io/api/core/v1"
2425
"k8s.io/apimachinery/pkg/api/errors"
2526
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2627
"k8s.io/apimachinery/pkg/util/wait"
28+
utilfeature "k8s.io/apiserver/pkg/util/feature"
29+
featuregatetesting "k8s.io/component-base/featuregate/testing"
30+
"k8s.io/kubernetes/pkg/features"
31+
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
32+
st "k8s.io/kubernetes/pkg/scheduler/testing"
2733
testutils "k8s.io/kubernetes/test/utils"
2834
imageutils "k8s.io/kubernetes/test/utils/image"
2935
)
@@ -920,3 +926,165 @@ func TestNodePIDPressure(t *testing.T) {
920926

921927
cleanupPods(cs, t, []*v1.Pod{testPod})
922928
}
929+
930+
// TestEvenPodsSpreadPredicate verifies that EvenPodsSpread predicate functions well.
931+
func TestEvenPodsSpreadPredicate(t *testing.T) {
932+
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
933+
// Apply feature gates to enable EvenPodsSpread
934+
defer algorithmprovider.ApplyFeatureGates()()
935+
936+
context := initTest(t, "eps-predicate")
937+
cs := context.clientSet
938+
ns := context.ns.Name
939+
defer cleanupTest(t, context)
940+
// Add 4 nodes.
941+
nodes, err := createNodes(cs, "node", nil, 4)
942+
if err != nil {
943+
t.Fatalf("Cannot create nodes: %v", err)
944+
}
945+
for i, node := range nodes {
946+
// Apply labels "zone: zone-{0,1}" and "node: <node name>" to each node.
947+
labels := map[string]string{
948+
"zone": fmt.Sprintf("zone-%d", i/2),
949+
"node": node.Name,
950+
}
951+
if err = testutils.AddLabelsToNode(cs, node.Name, labels); err != nil {
952+
t.Fatalf("Cannot add labels to node: %v", err)
953+
}
954+
if err = waitForNodeLabels(cs, node.Name, labels); err != nil {
955+
t.Fatalf("Failed to poll node labels: %v", err)
956+
}
957+
}
958+
959+
pause := imageutils.GetPauseImageName()
960+
tests := []struct {
961+
name string
962+
incomingPod *v1.Pod
963+
existingPods []*v1.Pod
964+
fits bool
965+
candidateNodes []string // nodes expected to schedule onto
966+
}{
967+
// note: naming starts at index 0
968+
{
969+
name: "place pod on a 1/1/0/1 cluster with MaxSkew=1, node-2 is the only fit",
970+
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
971+
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
972+
Obj(),
973+
existingPods: []*v1.Pod{
974+
st.MakePod().Namespace(ns).Name("p0").Node("node-0").Label("foo", "").Container(pause).Obj(),
975+
st.MakePod().Namespace(ns).Name("p1").Node("node-1").Label("foo", "").Container(pause).Obj(),
976+
st.MakePod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
977+
},
978+
fits: true,
979+
candidateNodes: []string{"node-2"},
980+
},
981+
{
982+
name: "place pod on a 2/0/0/1 cluster with MaxSkew=2, node-{1,2,3} are good fits",
983+
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
984+
SpreadConstraint(2, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
985+
Obj(),
986+
existingPods: []*v1.Pod{
987+
st.MakePod().Namespace(ns).Name("p0a").Node("node-0").Label("foo", "").Container(pause).Obj(),
988+
st.MakePod().Namespace(ns).Name("p0b").Node("node-0").Label("foo", "").Container(pause).Obj(),
989+
st.MakePod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
990+
},
991+
fits: true,
992+
candidateNodes: []string{"node-1", "node-2", "node-3"},
993+
},
994+
{
995+
name: "pod is required to be placed on zone0, so only node-1 fits",
996+
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
997+
NodeAffinityIn("zone", []string{"zone-0"}).
998+
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
999+
Obj(),
1000+
existingPods: []*v1.Pod{
1001+
st.MakePod().Namespace(ns).Name("p0").Node("node-0").Label("foo", "").Container(pause).Obj(),
1002+
st.MakePod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
1003+
},
1004+
fits: true,
1005+
candidateNodes: []string{"node-1"},
1006+
},
1007+
{
1008+
name: "two constraints: pod can only be placed to zone-1/node-2",
1009+
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
1010+
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
1011+
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
1012+
Obj(),
1013+
existingPods: []*v1.Pod{
1014+
st.MakePod().Namespace(ns).Name("p0").Node("node-0").Label("foo", "").Container(pause).Obj(),
1015+
st.MakePod().Namespace(ns).Name("p1").Node("node-1").Label("foo", "").Container(pause).Obj(),
1016+
st.MakePod().Namespace(ns).Name("p3a").Node("node-3").Label("foo", "").Container(pause).Obj(),
1017+
st.MakePod().Namespace(ns).Name("p3b").Node("node-3").Label("foo", "").Container(pause).Obj(),
1018+
},
1019+
fits: true,
1020+
candidateNodes: []string{"node-2"},
1021+
},
1022+
{
1023+
name: "pod cannot be placed onto any node",
1024+
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
1025+
NodeAffinityNotIn("node", []string{"node-0"}). // mock a 3-node cluster
1026+
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
1027+
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
1028+
Obj(),
1029+
existingPods: []*v1.Pod{
1030+
st.MakePod().Namespace(ns).Name("p1a").Node("node-1").Label("foo", "").Container(pause).Obj(),
1031+
st.MakePod().Namespace(ns).Name("p1b").Node("node-1").Label("foo", "").Container(pause).Obj(),
1032+
st.MakePod().Namespace(ns).Name("p2a").Node("node-2").Label("foo", "").Container(pause).Obj(),
1033+
st.MakePod().Namespace(ns).Name("p2b").Node("node-2").Label("foo", "").Container(pause).Obj(),
1034+
st.MakePod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
1035+
},
1036+
fits: false,
1037+
},
1038+
{
1039+
name: "high priority pod can preempt others",
1040+
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).Priority(100).
1041+
NodeAffinityNotIn("node", []string{"node-0"}). // mock a 3-node cluster
1042+
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
1043+
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
1044+
Obj(),
1045+
existingPods: []*v1.Pod{
1046+
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p1a").Node("node-1").Label("foo", "").Container(pause).Obj(),
1047+
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p1b").Node("node-1").Label("foo", "").Container(pause).Obj(),
1048+
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p2a").Node("node-2").Label("foo", "").Container(pause).Obj(),
1049+
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p2b").Node("node-2").Label("foo", "").Container(pause).Obj(),
1050+
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
1051+
},
1052+
fits: true,
1053+
candidateNodes: []string{"node-1", "node-2", "node-3"},
1054+
},
1055+
}
1056+
for _, tt := range tests {
1057+
t.Run(tt.name, func(t *testing.T) {
1058+
allPods := append(tt.existingPods, tt.incomingPod)
1059+
defer cleanupPods(cs, t, allPods)
1060+
for _, pod := range tt.existingPods {
1061+
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
1062+
if err != nil {
1063+
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
1064+
}
1065+
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
1066+
if err != nil {
1067+
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
1068+
}
1069+
}
1070+
testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod)
1071+
if err != nil && !errors.IsInvalid(err) {
1072+
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
1073+
}
1074+
1075+
if tt.fits {
1076+
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.candidateNodes))
1077+
} else {
1078+
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
1079+
}
1080+
if err != nil {
1081+
t.Errorf("Test Failed: %v", err)
1082+
}
1083+
})
1084+
}
1085+
}
1086+
1087+
var (
1088+
hardSpread = v1.DoNotSchedule
1089+
softSpread = v1.ScheduleAnyway
1090+
)

test/integration/scheduler/preemption_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,10 +273,10 @@ func TestPreemption(t *testing.T) {
273273
}
274274
}
275275
}
276-
// Also check that the preemptor pod gets the annotation for nominated node name.
276+
// Also check that the preemptor pod gets the NominatedNodeName field set.
277277
if len(test.preemptedPodIndexes) > 0 {
278278
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
279-
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
279+
t.Errorf("Test [%v]: NominatedNodeName field was not set for pod %v: %v", test.description, preemptor.Name, err)
280280
}
281281
}
282282

0 commit comments

Comments
 (0)