Skip to content

Commit 64904d0

Browse files
committed
e2e: topomgr: extend tests to all the policies
Per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0035-20190130-topology-manager.md#multi-numa-systems-tests we validate only the results for single-numa node policy, because the is no a simple and reliable way to validate the allocation performed by the other policies. Signed-off-by: Francesco Romani <[email protected]>
1 parent a249b93 commit 64904d0

File tree

2 files changed

+35
-24
lines changed

2 files changed

+35
-24
lines changed

test/e2e_node/numa_alignment.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,7 @@ func makeEnvMap(logs string) (map[string]string, error) {
180180
type testEnvInfo struct {
181181
numaNodes int
182182
sriovResourceName string
183+
policy string
183184
}
184185

185186
func containerWantsDevices(cnt *v1.Container, envInfo *testEnvInfo) bool {

test/e2e_node/topology_manager_test.go

Lines changed: 34 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -568,8 +568,12 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
568568
pods = append(pods, pod)
569569
}
570570

571-
for podID := 0; podID < numPods; podID++ {
572-
validatePodAlignment(f, pods[podID], envInfo)
571+
// per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0035-20190130-topology-manager.md#multi-numa-systems-tests
572+
// we can do a menaingful validation only when using the single-numa node policy
573+
if envInfo.policy == topologymanager.PolicySingleNumaNode {
574+
for podID := 0; podID < numPods; podID++ {
575+
validatePodAlignment(f, pods[podID], envInfo)
576+
}
573577
}
574578

575579
for podID := 0; podID < numPods; podID++ {
@@ -703,7 +707,7 @@ func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
703707
framework.ExpectNoError(err)
704708
}
705709

706-
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs string, numaNodes, coreCount int) {
710+
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs string, numaNodes, coreCount int, policy string) {
707711
threadsPerCore := 1
708712
if isHTEnabled() {
709713
threadsPerCore = 2
@@ -713,6 +717,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
713717
envInfo := &testEnvInfo{
714718
numaNodes: numaNodes,
715719
sriovResourceName: sd.resourceName,
720+
policy: policy,
716721
}
717722

718723
// could have been a loop, we unroll it to explain the testcases
@@ -859,22 +864,24 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
859864
runTopologyManagerPositiveTest(f, 2, ctnAttrs, envInfo)
860865
}
861866

862-
// overflow NUMA node capacity: cores
863-
numCores := 1 + (threadsPerCore * coreCount)
864-
excessCoresReq := fmt.Sprintf("%dm", numCores*1000)
865-
ginkgo.By(fmt.Sprintf("Trying to admit a guaranteed pods, with %d cores, 1 %s device - and it should be rejected", numCores, sd.resourceName))
866-
ctnAttrs = []tmCtnAttribute{
867-
{
868-
ctnName: "gu-container",
869-
cpuRequest: excessCoresReq,
870-
cpuLimit: excessCoresReq,
871-
deviceName: sd.resourceName,
872-
deviceRequest: "1",
873-
deviceLimit: "1",
874-
},
867+
// this is the only policy that can guarantee reliable rejects
868+
if policy == topologymanager.PolicySingleNumaNode {
869+
// overflow NUMA node capacity: cores
870+
numCores := 1 + (threadsPerCore * coreCount)
871+
excessCoresReq := fmt.Sprintf("%dm", numCores*1000)
872+
ginkgo.By(fmt.Sprintf("Trying to admit a guaranteed pods, with %d cores, 1 %s device - and it should be rejected", numCores, sd.resourceName))
873+
ctnAttrs = []tmCtnAttribute{
874+
{
875+
ctnName: "gu-container",
876+
cpuRequest: excessCoresReq,
877+
cpuLimit: excessCoresReq,
878+
deviceName: sd.resourceName,
879+
deviceRequest: "1",
880+
deviceLimit: "1",
881+
},
882+
}
883+
runTopologyManagerNegativeTest(f, 1, ctnAttrs, envInfo)
875884
}
876-
runTopologyManagerNegativeTest(f, 1, ctnAttrs, envInfo)
877-
878885
teardownSRIOVConfigOrFail(f, sd)
879886
}
880887

@@ -927,15 +934,18 @@ func runTopologyManagerTests(f *framework.Framework) {
927934
oldCfg, err = getCurrentKubeletConfig()
928935
framework.ExpectNoError(err)
929936

930-
policy := topologymanager.PolicySingleNumaNode
937+
var policies = []string{topologymanager.PolicySingleNumaNode, topologymanager.PolicyRestricted,
938+
topologymanager.PolicyBestEffort, topologymanager.PolicyNone}
931939

932-
// Configure Topology Manager
933-
ginkgo.By(fmt.Sprintf("by configuring Topology Manager policy to %s", policy))
934-
framework.Logf("Configuring topology Manager policy to %s", policy)
940+
for _, policy := range policies {
941+
// Configure Topology Manager
942+
ginkgo.By(fmt.Sprintf("by configuring Topology Manager policy to %s", policy))
943+
framework.Logf("Configuring topology Manager policy to %s", policy)
935944

936-
reservedSystemCPUs := configureTopologyManagerInKubelet(f, oldCfg, policy, configMap, numaNodes)
945+
reservedSystemCPUs := configureTopologyManagerInKubelet(f, oldCfg, policy, configMap, numaNodes)
937946

938-
runTopologyManagerNodeAlignmentSuiteTests(f, configMap, reservedSystemCPUs, numaNodes, coreCount)
947+
runTopologyManagerNodeAlignmentSuiteTests(f, configMap, reservedSystemCPUs, numaNodes, coreCount, policy)
948+
}
939949

940950
// restore kubelet config
941951
setOldKubeletConfig(f, oldCfg)

0 commit comments

Comments
 (0)