@@ -568,8 +568,12 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
568
568
pods = append (pods , pod )
569
569
}
570
570
571
- for podID := 0 ; podID < numPods ; podID ++ {
572
- validatePodAlignment (f , pods [podID ], envInfo )
571
+ // per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0035-20190130-topology-manager.md#multi-numa-systems-tests
572
+ // we can do a menaingful validation only when using the single-numa node policy
573
+ if envInfo .policy == topologymanager .PolicySingleNumaNode {
574
+ for podID := 0 ; podID < numPods ; podID ++ {
575
+ validatePodAlignment (f , pods [podID ], envInfo )
576
+ }
573
577
}
574
578
575
579
for podID := 0 ; podID < numPods ; podID ++ {
@@ -703,7 +707,7 @@ func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
703
707
framework .ExpectNoError (err )
704
708
}
705
709
706
- func runTopologyManagerNodeAlignmentSuiteTests (f * framework.Framework , configMap * v1.ConfigMap , reservedSystemCPUs string , numaNodes , coreCount int ) {
710
+ func runTopologyManagerNodeAlignmentSuiteTests (f * framework.Framework , configMap * v1.ConfigMap , reservedSystemCPUs string , numaNodes , coreCount int , policy string ) {
707
711
threadsPerCore := 1
708
712
if isHTEnabled () {
709
713
threadsPerCore = 2
@@ -713,6 +717,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
713
717
envInfo := & testEnvInfo {
714
718
numaNodes : numaNodes ,
715
719
sriovResourceName : sd .resourceName ,
720
+ policy : policy ,
716
721
}
717
722
718
723
// could have been a loop, we unroll it to explain the testcases
@@ -859,22 +864,24 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
859
864
runTopologyManagerPositiveTest (f , 2 , ctnAttrs , envInfo )
860
865
}
861
866
862
- // overflow NUMA node capacity: cores
863
- numCores := 1 + (threadsPerCore * coreCount )
864
- excessCoresReq := fmt .Sprintf ("%dm" , numCores * 1000 )
865
- ginkgo .By (fmt .Sprintf ("Trying to admit a guaranteed pods, with %d cores, 1 %s device - and it should be rejected" , numCores , sd .resourceName ))
866
- ctnAttrs = []tmCtnAttribute {
867
- {
868
- ctnName : "gu-container" ,
869
- cpuRequest : excessCoresReq ,
870
- cpuLimit : excessCoresReq ,
871
- deviceName : sd .resourceName ,
872
- deviceRequest : "1" ,
873
- deviceLimit : "1" ,
874
- },
867
+ // this is the only policy that can guarantee reliable rejects
868
+ if policy == topologymanager .PolicySingleNumaNode {
869
+ // overflow NUMA node capacity: cores
870
+ numCores := 1 + (threadsPerCore * coreCount )
871
+ excessCoresReq := fmt .Sprintf ("%dm" , numCores * 1000 )
872
+ ginkgo .By (fmt .Sprintf ("Trying to admit a guaranteed pods, with %d cores, 1 %s device - and it should be rejected" , numCores , sd .resourceName ))
873
+ ctnAttrs = []tmCtnAttribute {
874
+ {
875
+ ctnName : "gu-container" ,
876
+ cpuRequest : excessCoresReq ,
877
+ cpuLimit : excessCoresReq ,
878
+ deviceName : sd .resourceName ,
879
+ deviceRequest : "1" ,
880
+ deviceLimit : "1" ,
881
+ },
882
+ }
883
+ runTopologyManagerNegativeTest (f , 1 , ctnAttrs , envInfo )
875
884
}
876
- runTopologyManagerNegativeTest (f , 1 , ctnAttrs , envInfo )
877
-
878
885
teardownSRIOVConfigOrFail (f , sd )
879
886
}
880
887
@@ -927,15 +934,18 @@ func runTopologyManagerTests(f *framework.Framework) {
927
934
oldCfg , err = getCurrentKubeletConfig ()
928
935
framework .ExpectNoError (err )
929
936
930
- policy := topologymanager .PolicySingleNumaNode
937
+ var policies = []string {topologymanager .PolicySingleNumaNode , topologymanager .PolicyRestricted ,
938
+ topologymanager .PolicyBestEffort , topologymanager .PolicyNone }
931
939
932
- // Configure Topology Manager
933
- ginkgo .By (fmt .Sprintf ("by configuring Topology Manager policy to %s" , policy ))
934
- framework .Logf ("Configuring topology Manager policy to %s" , policy )
940
+ for _ , policy := range policies {
941
+ // Configure Topology Manager
942
+ ginkgo .By (fmt .Sprintf ("by configuring Topology Manager policy to %s" , policy ))
943
+ framework .Logf ("Configuring topology Manager policy to %s" , policy )
935
944
936
- reservedSystemCPUs := configureTopologyManagerInKubelet (f , oldCfg , policy , configMap , numaNodes )
945
+ reservedSystemCPUs := configureTopologyManagerInKubelet (f , oldCfg , policy , configMap , numaNodes )
937
946
938
- runTopologyManagerNodeAlignmentSuiteTests (f , configMap , reservedSystemCPUs , numaNodes , coreCount )
947
+ runTopologyManagerNodeAlignmentSuiteTests (f , configMap , reservedSystemCPUs , numaNodes , coreCount , policy )
948
+ }
939
949
940
950
// restore kubelet config
941
951
setOldKubeletConfig (f , oldCfg )
0 commit comments