@@ -67,6 +67,7 @@ type pausePodConfig struct {
67
67
OwnerReferences []metav1.OwnerReference
68
68
PriorityClassName string
69
69
DeletionGracePeriodSeconds * int64
70
+ TopologySpreadConstraints []v1.TopologySpreadConstraint
70
71
}
71
72
72
73
var _ = SIGDescribe ("SchedulerPredicates [Serial]" , func () {
@@ -604,6 +605,84 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
604
605
ginkgo .By (fmt .Sprintf ("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled" , port ))
605
606
createHostPortPodOnNode (f , "pod5" , ns , "127.0.0.1" , port , v1 .ProtocolTCP , nodeSelector , false )
606
607
})
608
+
609
+ ginkgo .Context ("PodTopologySpread Filtering" , func () {
610
+ var nodeNames []string
611
+ topologyKey := "kubernetes.io/e2e-pts-filter"
612
+
613
+ ginkgo .BeforeEach (func () {
614
+ ginkgo .By ("Trying to get 2 available nodes which can run pod" )
615
+ nodeNames = Get2NodesThatCanRunPod (f )
616
+ ginkgo .By (fmt .Sprintf ("Apply dedicated topologyKey %v for this test on the 2 nodes." , topologyKey ))
617
+ for _ , nodeName := range nodeNames {
618
+ framework .AddOrUpdateLabelOnNode (cs , nodeName , topologyKey , nodeName )
619
+ }
620
+ })
621
+ ginkgo .AfterEach (func () {
622
+ for _ , nodeName := range nodeNames {
623
+ framework .RemoveLabelOffNode (cs , nodeName , topologyKey )
624
+ }
625
+ })
626
+
627
+ ginkgo .It ("validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes" , func () {
628
+ podLabel := "e2e-pts-filter"
629
+ replicas := 4
630
+ rsConfig := pauseRSConfig {
631
+ Replicas : int32 (replicas ),
632
+ PodConfig : pausePodConfig {
633
+ Name : podLabel ,
634
+ Namespace : ns ,
635
+ Labels : map [string ]string {podLabel : "" },
636
+ Affinity : & v1.Affinity {
637
+ NodeAffinity : & v1.NodeAffinity {
638
+ RequiredDuringSchedulingIgnoredDuringExecution : & v1.NodeSelector {
639
+ NodeSelectorTerms : []v1.NodeSelectorTerm {
640
+ {
641
+ MatchExpressions : []v1.NodeSelectorRequirement {
642
+ {
643
+ Key : topologyKey ,
644
+ Operator : v1 .NodeSelectorOpIn ,
645
+ Values : nodeNames ,
646
+ },
647
+ },
648
+ },
649
+ },
650
+ },
651
+ },
652
+ },
653
+ TopologySpreadConstraints : []v1.TopologySpreadConstraint {
654
+ {
655
+ MaxSkew : 1 ,
656
+ TopologyKey : topologyKey ,
657
+ WhenUnsatisfiable : v1 .DoNotSchedule ,
658
+ LabelSelector : & metav1.LabelSelector {
659
+ MatchExpressions : []metav1.LabelSelectorRequirement {
660
+ {
661
+ Key : podLabel ,
662
+ Operator : metav1 .LabelSelectorOpExists ,
663
+ },
664
+ },
665
+ },
666
+ },
667
+ },
668
+ },
669
+ }
670
+ runPauseRS (f , rsConfig )
671
+ podList , err := cs .CoreV1 ().Pods (ns ).List (context .TODO (), metav1.ListOptions {})
672
+ framework .ExpectNoError (err )
673
+ numInNode1 , numInNode2 := 0 , 0
674
+ for _ , pod := range podList .Items {
675
+ if pod .Spec .NodeName == nodeNames [0 ] {
676
+ numInNode1 ++
677
+ } else if pod .Spec .NodeName == nodeNames [1 ] {
678
+ numInNode2 ++
679
+ }
680
+ }
681
+ expected := replicas / len (nodeNames )
682
+ framework .ExpectEqual (numInNode1 , expected , fmt .Sprintf ("Pods are not distributed as expected on node %q" , nodeNames [0 ]))
683
+ framework .ExpectEqual (numInNode2 , expected , fmt .Sprintf ("Pods are not distributed as expected on node %q" , nodeNames [1 ]))
684
+ })
685
+ })
607
686
})
608
687
609
688
// printAllKubeletPods outputs status of all kubelet pods into log.
@@ -633,8 +712,9 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
633
712
OwnerReferences : conf .OwnerReferences ,
634
713
},
635
714
Spec : v1.PodSpec {
636
- NodeSelector : conf .NodeSelector ,
637
- Affinity : conf .Affinity ,
715
+ NodeSelector : conf .NodeSelector ,
716
+ Affinity : conf .Affinity ,
717
+ TopologySpreadConstraints : conf .TopologySpreadConstraints ,
638
718
Containers : []v1.Container {
639
719
{
640
720
Name : conf .Name ,
@@ -669,7 +749,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
669
749
670
750
func runPausePod (f * framework.Framework , conf pausePodConfig ) * v1.Pod {
671
751
pod := createPausePod (f , conf )
672
- framework .ExpectNoError (e2epod .WaitForPodRunningInNamespace (f .ClientSet , pod ))
752
+ framework .ExpectNoError (e2epod .WaitTimeoutForPodRunningInNamespace (f .ClientSet , pod . Name , pod . Namespace , framework . PollShortTimeout ))
673
753
pod , err := f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Get (context .TODO (), conf .Name , metav1.GetOptions {})
674
754
framework .ExpectNoError (err )
675
755
return pod
@@ -750,6 +830,30 @@ func GetNodeThatCanRunPod(f *framework.Framework) string {
750
830
return runPodAndGetNodeName (f , pausePodConfig {Name : "without-label" })
751
831
}
752
832
833
+ // Get2NodesThatCanRunPod return a 2-node slice where can run pod.
834
+ func Get2NodesThatCanRunPod (f * framework.Framework ) []string {
835
+ firstNode := GetNodeThatCanRunPod (f )
836
+ ginkgo .By ("Trying to launch a pod without a label to get a node which can launch it." )
837
+ pod := pausePodConfig {
838
+ Name : "without-label" ,
839
+ Affinity : & v1.Affinity {
840
+ NodeAffinity : & v1.NodeAffinity {
841
+ RequiredDuringSchedulingIgnoredDuringExecution : & v1.NodeSelector {
842
+ NodeSelectorTerms : []v1.NodeSelectorTerm {
843
+ {
844
+ MatchFields : []v1.NodeSelectorRequirement {
845
+ {Key : "metadata.name" , Operator : v1 .NodeSelectorOpNotIn , Values : []string {firstNode }},
846
+ },
847
+ },
848
+ },
849
+ },
850
+ },
851
+ },
852
+ }
853
+ secondNode := runPodAndGetNodeName (f , pod )
854
+ return []string {firstNode , secondNode }
855
+ }
856
+
753
857
func getNodeThatCanRunPodWithoutToleration (f * framework.Framework ) string {
754
858
ginkgo .By ("Trying to launch a pod without a toleration to get a node which can launch it." )
755
859
return runPodAndGetNodeName (f , pausePodConfig {Name : "without-toleration" })
0 commit comments