Skip to content

Commit 5281152

Browse files
committed
feature: Added score integration tests for missing part plugins: TaintToleration plugin
1 parent 4bf6cdb commit 5281152

File tree

1 file changed

+102
-7
lines changed

1 file changed

+102
-7
lines changed

test/integration/scheduler/scoring/priorities_test.go

Lines changed: 102 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import (
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2828
"k8s.io/apimachinery/pkg/runtime"
2929
"k8s.io/apimachinery/pkg/util/intstr"
30+
"k8s.io/apimachinery/pkg/util/sets"
3031
"k8s.io/apimachinery/pkg/util/wait"
3132
utilfeature "k8s.io/apiserver/pkg/util/feature"
3233
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -39,6 +40,7 @@ import (
3940
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
4041
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
4142
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
43+
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
4244
st "k8s.io/kubernetes/pkg/scheduler/testing"
4345
testutils "k8s.io/kubernetes/test/integration/util"
4446
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -71,8 +73,11 @@ const (
7173
pollInterval = 100 * time.Millisecond
7274
)
7375

74-
// This file tests the scheduler priority functions.
75-
func initTestSchedulerForPriorityTest(t *testing.T, preScorePluginName, scorePluginName string) *testutils.TestContext {
76+
// initTestSchedulerForScoringTests initializes the test environment for scheduler scoring function tests.
77+
// It configures a scheduler configuration, enabling the specified prescore and score plugins,
78+
// while disabling all other plugins.
79+
// This setup ensures that only the desired plugins are active during the integration test.
80+
func initTestSchedulerForScoringTests(t *testing.T, preScorePluginName, scorePluginName string) *testutils.TestContext {
7681
cc := configv1.KubeSchedulerConfiguration{
7782
Profiles: []configv1.KubeSchedulerProfile{{
7883
SchedulerName: pointer.String(v1.DefaultSchedulerName),
@@ -290,7 +295,7 @@ func TestNodeResourcesScoring(t *testing.T) {
290295
// TestNodeAffinityScoring verifies that scheduler's node affinity priority function
291296
// works correctly.
292297
func TestNodeAffinityScoring(t *testing.T) {
293-
testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name, nodeaffinity.Name)
298+
testCtx := initTestSchedulerForScoringTests(t, nodeaffinity.Name, nodeaffinity.Name)
294299
// Add a few nodes.
295300
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode(), 4)
296301
if err != nil {
@@ -726,7 +731,7 @@ func TestPodAffinityScoring(t *testing.T) {
726731
t.Run(tt.name, func(t *testing.T) {
727732
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodAffinity, tt.enableMatchLabelKeysInAffinity)
728733

729-
testCtx := initTestSchedulerForPriorityTest(t, interpodaffinity.Name, interpodaffinity.Name)
734+
testCtx := initTestSchedulerForScoringTests(t, interpodaffinity.Name, interpodaffinity.Name)
730735
if err := createNamespacesWithLabels(testCtx.ClientSet, []string{"ns1", "ns2"}, map[string]string{"team": "team1"}); err != nil {
731736
t.Fatal(err)
732737
}
@@ -756,10 +761,100 @@ func TestPodAffinityScoring(t *testing.T) {
756761
}
757762
}
758763

764+
func TestTaintTolerationScoring(t *testing.T) {
765+
tests := []struct {
766+
name string
767+
podTolerations []v1.Toleration
768+
nodes []*v1.Node
769+
// expectedNodesName is a set of nodes that the pod should potentially be scheduled on.
770+
// It is used to verify that the pod is scheduled on the expected nodes.
771+
expectedNodesName sets.Set[string]
772+
}{
773+
{
774+
name: "no taints or tolerations",
775+
podTolerations: []v1.Toleration{},
776+
nodes: []*v1.Node{
777+
st.MakeNode().Name("node-1").Obj(),
778+
st.MakeNode().Name("node-2").Obj(),
779+
},
780+
expectedNodesName: sets.New("node-1", "node-2"),
781+
},
782+
{
783+
name: "pod with toleration for node's taint",
784+
podTolerations: []v1.Toleration{
785+
{
786+
Key: "example-key",
787+
Operator: v1.TolerationOpEqual,
788+
Value: "example-value",
789+
Effect: v1.TaintEffectPreferNoSchedule,
790+
},
791+
},
792+
nodes: []*v1.Node{
793+
st.MakeNode().Name("node-1").
794+
Taints([]v1.Taint{
795+
{
796+
Key: "example-key",
797+
Value: "example-value",
798+
Effect: v1.TaintEffectPreferNoSchedule,
799+
},
800+
}).Obj(),
801+
st.MakeNode().Name("node-2").Obj(),
802+
},
803+
expectedNodesName: sets.New("node-1", "node-2"),
804+
},
805+
{
806+
name: "pod without toleration for node's taint",
807+
podTolerations: []v1.Toleration{
808+
{
809+
Key: "other-key",
810+
Operator: v1.TolerationOpEqual,
811+
Value: "other-value",
812+
Effect: v1.TaintEffectPreferNoSchedule,
813+
},
814+
},
815+
nodes: []*v1.Node{
816+
st.MakeNode().Name("node-1").
817+
Taints([]v1.Taint{
818+
{
819+
Key: "example-key",
820+
Value: "example-value",
821+
Effect: v1.TaintEffectPreferNoSchedule,
822+
},
823+
}).Obj(),
824+
st.MakeNode().Name("node-2").Obj(),
825+
},
826+
expectedNodesName: sets.New("node-2"),
827+
},
828+
}
829+
for i, tt := range tests {
830+
t.Run(tt.name, func(t *testing.T) {
831+
testCtx := initTestSchedulerForScoringTests(t, tainttoleration.Name, tainttoleration.Name)
832+
833+
for _, n := range tt.nodes {
834+
if _, err := createNode(testCtx.ClientSet, n); err != nil {
835+
t.Fatalf("Failed to create node: %v", err)
836+
}
837+
}
838+
pod, err := runPausePod(testCtx.ClientSet, initPausePod(&testutils.PausePodConfig{
839+
Name: fmt.Sprintf("test-pod-%v", i),
840+
Namespace: testCtx.NS.Name,
841+
Tolerations: tt.podTolerations,
842+
}))
843+
if err != nil {
844+
t.Fatalf("Error running pause pod: %v", err)
845+
}
846+
defer testutils.CleanupPods(testCtx.Ctx, testCtx.ClientSet, t, []*v1.Pod{pod})
847+
if !tt.expectedNodesName.Has(pod.Spec.NodeName) {
848+
t.Errorf("Pod was not scheduled to expected node: %v", pod.Spec.NodeName)
849+
}
850+
})
851+
}
852+
}
853+
759854
// TestImageLocalityScoring verifies that the scheduler's image locality priority function
760855
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
761856
func TestImageLocalityScoring(t *testing.T) {
762-
testCtx := initTestSchedulerForPriorityTest(t, "", imagelocality.Name)
857+
testCtx := initTestSchedulerForScoringTests(t, "", imagelocality.Name)
763858

764859
// Create a node with the large image.
765860
// We use a fake large image as the test image used by the pod, which has
@@ -991,7 +1086,7 @@ func TestPodTopologySpreadScoring(t *testing.T) {
9911086
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeInclusionPolicyInPodTopologySpread, tt.enableNodeInclusionPolicy)
9921087
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodTopologySpread, tt.enableMatchLabelKeys)
9931088

994-
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name, podtopologyspread.Name)
1089+
testCtx := initTestSchedulerForScoringTests(t, podtopologyspread.Name, podtopologyspread.Name)
9951090
cs := testCtx.ClientSet
9961091
ns := testCtx.NS.Name
9971092

@@ -1044,7 +1139,7 @@ func TestPodTopologySpreadScoring(t *testing.T) {
10441139
// with the system default spreading spreads Pods belonging to a Service.
10451140
// The setup has 300 nodes over 3 zones.
10461141
func TestDefaultPodTopologySpreadScoring(t *testing.T) {
1047-
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name, podtopologyspread.Name)
1142+
testCtx := initTestSchedulerForScoringTests(t, podtopologyspread.Name, podtopologyspread.Name)
10481143
cs := testCtx.ClientSet
10491144
ns := testCtx.NS.Name
10501145

0 commit comments

Comments
 (0)