@@ -27,6 +27,7 @@ import (
27
27
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28
28
"k8s.io/apimachinery/pkg/runtime"
29
29
"k8s.io/apimachinery/pkg/util/intstr"
30
+ "k8s.io/apimachinery/pkg/util/sets"
30
31
"k8s.io/apimachinery/pkg/util/wait"
31
32
utilfeature "k8s.io/apiserver/pkg/util/feature"
32
33
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -39,6 +40,7 @@ import (
39
40
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
40
41
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
41
42
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
43
+ "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
42
44
st "k8s.io/kubernetes/pkg/scheduler/testing"
43
45
testutils "k8s.io/kubernetes/test/integration/util"
44
46
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -71,8 +73,11 @@ const (
71
73
pollInterval = 100 * time .Millisecond
72
74
)
73
75
74
- // This file tests the scheduler priority functions.
75
- func initTestSchedulerForPriorityTest (t * testing.T , preScorePluginName , scorePluginName string ) * testutils.TestContext {
76
+ // initTestSchedulerForScoringTests initializes the test environment for scheduler scoring function tests.
77
+ // It configures a scheduler configuration, enabling the specified prescore and score plugins,
78
+ // while disabling all other plugins.
79
+ // This setup ensures that only the desired plugins are active during the integration test.
80
+ func initTestSchedulerForScoringTests (t * testing.T , preScorePluginName , scorePluginName string ) * testutils.TestContext {
76
81
cc := configv1.KubeSchedulerConfiguration {
77
82
Profiles : []configv1.KubeSchedulerProfile {{
78
83
SchedulerName : pointer .String (v1 .DefaultSchedulerName ),
@@ -290,7 +295,7 @@ func TestNodeResourcesScoring(t *testing.T) {
290
295
// TestNodeAffinityScoring verifies that scheduler's node affinity priority function
291
296
// works correctly.
292
297
func TestNodeAffinityScoring (t * testing.T ) {
293
- testCtx := initTestSchedulerForPriorityTest (t , nodeaffinity .Name , nodeaffinity .Name )
298
+ testCtx := initTestSchedulerForScoringTests (t , nodeaffinity .Name , nodeaffinity .Name )
294
299
// Add a few nodes.
295
300
_ , err := createAndWaitForNodesInCache (testCtx , "testnode" , st .MakeNode (), 4 )
296
301
if err != nil {
@@ -726,7 +731,7 @@ func TestPodAffinityScoring(t *testing.T) {
726
731
t .Run (tt .name , func (t * testing.T ) {
727
732
featuregatetesting .SetFeatureGateDuringTest (t , utilfeature .DefaultFeatureGate , features .MatchLabelKeysInPodAffinity , tt .enableMatchLabelKeysInAffinity )
728
733
729
- testCtx := initTestSchedulerForPriorityTest (t , interpodaffinity .Name , interpodaffinity .Name )
734
+ testCtx := initTestSchedulerForScoringTests (t , interpodaffinity .Name , interpodaffinity .Name )
730
735
if err := createNamespacesWithLabels (testCtx .ClientSet , []string {"ns1" , "ns2" }, map [string ]string {"team" : "team1" }); err != nil {
731
736
t .Fatal (err )
732
737
}
@@ -756,10 +761,100 @@ func TestPodAffinityScoring(t *testing.T) {
756
761
}
757
762
}
758
763
764
+ func TestTaintTolerationScoring (t * testing.T ) {
765
+ tests := []struct {
766
+ name string
767
+ podTolerations []v1.Toleration
768
+ nodes []* v1.Node
769
+ // expectedNodesName is a set of nodes that the pod should potentially be scheduled on.
770
+ // It is used to verify that the pod is scheduled on the expected nodes.
771
+ expectedNodesName sets.Set [string ]
772
+ }{
773
+ {
774
+ name : "no taints or tolerations" ,
775
+ podTolerations : []v1.Toleration {},
776
+ nodes : []* v1.Node {
777
+ st .MakeNode ().Name ("node-1" ).Obj (),
778
+ st .MakeNode ().Name ("node-2" ).Obj (),
779
+ },
780
+ expectedNodesName : sets .New ("node-1" , "node-2" ),
781
+ },
782
+ {
783
+ name : "pod with toleration for node's taint" ,
784
+ podTolerations : []v1.Toleration {
785
+ {
786
+ Key : "example-key" ,
787
+ Operator : v1 .TolerationOpEqual ,
788
+ Value : "example-value" ,
789
+ Effect : v1 .TaintEffectPreferNoSchedule ,
790
+ },
791
+ },
792
+ nodes : []* v1.Node {
793
+ st .MakeNode ().Name ("node-1" ).
794
+ Taints ([]v1.Taint {
795
+ {
796
+ Key : "example-key" ,
797
+ Value : "example-value" ,
798
+ Effect : v1 .TaintEffectPreferNoSchedule ,
799
+ },
800
+ }).Obj (),
801
+ st .MakeNode ().Name ("node-2" ).Obj (),
802
+ },
803
+ expectedNodesName : sets .New ("node-1" , "node-2" ),
804
+ },
805
+ {
806
+ name : "pod without toleration for node's taint" ,
807
+ podTolerations : []v1.Toleration {
808
+ {
809
+ Key : "other-key" ,
810
+ Operator : v1 .TolerationOpEqual ,
811
+ Value : "other-value" ,
812
+ Effect : v1 .TaintEffectPreferNoSchedule ,
813
+ },
814
+ },
815
+ nodes : []* v1.Node {
816
+ st .MakeNode ().Name ("node-1" ).
817
+ Taints ([]v1.Taint {
818
+ {
819
+ Key : "example-key" ,
820
+ Value : "example-value" ,
821
+ Effect : v1 .TaintEffectPreferNoSchedule ,
822
+ },
823
+ }).Obj (),
824
+ st .MakeNode ().Name ("node-2" ).Obj (),
825
+ },
826
+ expectedNodesName : sets .New ("node-2" ),
827
+ },
828
+ }
829
+ for i , tt := range tests {
830
+ t .Run (tt .name , func (t * testing.T ) {
831
+ testCtx := initTestSchedulerForScoringTests (t , tainttoleration .Name , tainttoleration .Name )
832
+
833
+ for _ , n := range tt .nodes {
834
+ if _ , err := createNode (testCtx .ClientSet , n ); err != nil {
835
+ t .Fatalf ("Failed to create node: %v" , err )
836
+ }
837
+ }
838
+ pod , err := runPausePod (testCtx .ClientSet , initPausePod (& testutils.PausePodConfig {
839
+ Name : fmt .Sprintf ("test-pod-%v" , i ),
840
+ Namespace : testCtx .NS .Name ,
841
+ Tolerations : tt .podTolerations ,
842
+ }))
843
+ if err != nil {
844
+ t .Fatalf ("Error running pause pod: %v" , err )
845
+ }
846
+ defer testutils .CleanupPods (testCtx .Ctx , testCtx .ClientSet , t , []* v1.Pod {pod })
847
+ if ! tt .expectedNodesName .Has (pod .Spec .NodeName ) {
848
+ t .Errorf ("Pod was not scheduled to expected node: %v" , pod .Spec .NodeName )
849
+ }
850
+ })
851
+ }
852
+ }
853
+
759
854
// TestImageLocalityScoring verifies that the scheduler's image locality priority function
760
855
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
761
856
func TestImageLocalityScoring (t * testing.T ) {
762
- testCtx := initTestSchedulerForPriorityTest (t , "" , imagelocality .Name )
857
+ testCtx := initTestSchedulerForScoringTests (t , "" , imagelocality .Name )
763
858
764
859
// Create a node with the large image.
765
860
// We use a fake large image as the test image used by the pod, which has
@@ -991,7 +1086,7 @@ func TestPodTopologySpreadScoring(t *testing.T) {
991
1086
featuregatetesting .SetFeatureGateDuringTest (t , utilfeature .DefaultFeatureGate , features .NodeInclusionPolicyInPodTopologySpread , tt .enableNodeInclusionPolicy )
992
1087
featuregatetesting .SetFeatureGateDuringTest (t , utilfeature .DefaultFeatureGate , features .MatchLabelKeysInPodTopologySpread , tt .enableMatchLabelKeys )
993
1088
994
- testCtx := initTestSchedulerForPriorityTest (t , podtopologyspread .Name , podtopologyspread .Name )
1089
+ testCtx := initTestSchedulerForScoringTests (t , podtopologyspread .Name , podtopologyspread .Name )
995
1090
cs := testCtx .ClientSet
996
1091
ns := testCtx .NS .Name
997
1092
@@ -1044,7 +1139,7 @@ func TestPodTopologySpreadScoring(t *testing.T) {
1044
1139
// with the system default spreading spreads Pods belonging to a Service.
1045
1140
// The setup has 300 nodes over 3 zones.
1046
1141
func TestDefaultPodTopologySpreadScoring (t * testing.T ) {
1047
- testCtx := initTestSchedulerForPriorityTest (t , podtopologyspread .Name , podtopologyspread .Name )
1142
+ testCtx := initTestSchedulerForScoringTests (t , podtopologyspread .Name , podtopologyspread .Name )
1048
1143
cs := testCtx .ClientSet
1049
1144
ns := testCtx .NS .Name
1050
1145
0 commit comments