Skip to content

Commit 1ede096

Browse files
author
Deepthi Dharwar
committed
Enable topology-manager-e2e tests to run on MultiNUMA nodes.
Signed-off-by: Deepthi Dharwar <[email protected]>
1 parent 4abbce4 commit 1ede096

File tree

1 file changed

+6
-210
lines changed

1 file changed

+6
-210
lines changed

test/e2e_node/topology_manager_test.go

Lines changed: 6 additions & 210 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ import (
3434
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
3535
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
3636
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
37-
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
3837
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
3938
"k8s.io/kubernetes/pkg/kubelet/types"
4039
"k8s.io/kubernetes/test/e2e/framework"
@@ -94,12 +93,6 @@ func detectSRIOVDevices() int {
9493
return devCount
9594
}
9695

97-
// makeTopologyMangerPod returns a pod with the provided tmCtnAttributes.
98-
func makeTopologyManagerPod(podName string, tmCtnAttributes []tmCtnAttribute) *v1.Pod {
99-
cpusetCmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d"
100-
return makeTopologyManagerTestPod(podName, cpusetCmd, tmCtnAttributes)
101-
}
102-
10396
func makeTopologyManagerTestPod(podName, podCmd string, tmCtnAttributes []tmCtnAttribute) *v1.Pod {
10497
var containers []v1.Container
10598
for _, ctnAttr := range tmCtnAttributes {
@@ -315,228 +308,31 @@ func validatePodAlignment(f *framework.Framework, pod *v1.Pod, envInfo *testEnvI
315308

316309
func runTopologyManagerPolicySuiteTests(f *framework.Framework) {
317310
var cpuCap, cpuAlloc int64
318-
var cpuListString, expAllowedCPUsListRegex string
319-
var cpuList []int
320-
var cpu1, cpu2 int
321-
var cset cpuset.CPUSet
322-
var err error
323-
var ctnAttrs []tmCtnAttribute
324-
var pod, pod1, pod2 *v1.Pod
325311

326312
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
327313

328314
ginkgo.By("running a non-Gu pod")
329-
ctnAttrs = []tmCtnAttribute{
330-
{
331-
ctnName: "non-gu-container",
332-
cpuRequest: "100m",
333-
cpuLimit: "200m",
334-
},
335-
}
336-
pod = makeTopologyManagerPod("non-gu-pod", ctnAttrs)
337-
pod = f.PodClient().CreateSync(pod)
338-
339-
ginkgo.By("checking if the expected cpuset was assigned")
340-
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
341-
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
342-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
343-
pod.Spec.Containers[0].Name, pod.Name)
344-
345-
ginkgo.By("by deleting the pods and waiting for container removal")
346-
deletePods(f, []string{pod.Name})
347-
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
315+
runNonGuPodTest(f, cpuCap)
348316

349317
ginkgo.By("running a Gu pod")
350-
ctnAttrs = []tmCtnAttribute{
351-
{
352-
ctnName: "gu-container",
353-
cpuRequest: "1000m",
354-
cpuLimit: "1000m",
355-
},
356-
}
357-
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
358-
pod = f.PodClient().CreateSync(pod)
359-
360-
ginkgo.By("checking if the expected cpuset was assigned")
361-
cpu1 = 1
362-
if isHTEnabled() {
363-
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
364-
cpu1 = cpuList[1]
365-
}
366-
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
367-
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
368-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
369-
pod.Spec.Containers[0].Name, pod.Name)
370-
371-
ginkgo.By("by deleting the pods and waiting for container removal")
372-
deletePods(f, []string{pod.Name})
373-
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
318+
runGuPodTest(f)
374319

375320
ginkgo.By("running multiple Gu and non-Gu pods")
376-
ctnAttrs = []tmCtnAttribute{
377-
{
378-
ctnName: "gu-container",
379-
cpuRequest: "1000m",
380-
cpuLimit: "1000m",
381-
},
382-
}
383-
pod1 = makeTopologyManagerPod("gu-pod", ctnAttrs)
384-
pod1 = f.PodClient().CreateSync(pod1)
385-
386-
ctnAttrs = []tmCtnAttribute{
387-
{
388-
ctnName: "non-gu-container",
389-
cpuRequest: "200m",
390-
cpuLimit: "300m",
391-
},
392-
}
393-
pod2 = makeTopologyManagerPod("non-gu-pod", ctnAttrs)
394-
pod2 = f.PodClient().CreateSync(pod2)
395-
396-
ginkgo.By("checking if the expected cpuset was assigned")
397-
cpu1 = 1
398-
if isHTEnabled() {
399-
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
400-
cpu1 = cpuList[1]
401-
}
402-
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
403-
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
404-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
405-
pod1.Spec.Containers[0].Name, pod1.Name)
406-
407-
cpuListString = "0"
408-
if cpuAlloc > 2 {
409-
cset = cpuset.MustParse(fmt.Sprintf("0-%d", cpuCap-1))
410-
cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.NewCPUSet(cpu1)))
411-
}
412-
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
413-
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
414-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
415-
pod2.Spec.Containers[0].Name, pod2.Name)
416-
417-
ginkgo.By("by deleting the pods and waiting for container removal")
418-
deletePods(f, []string{pod1.Name, pod2.Name})
419-
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
420-
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
321+
runMultipleGuNonGuPods(f, cpuCap, cpuAlloc)
421322

422323
// Skip rest of the tests if CPU capacity < 3.
423324
if cpuCap < 3 {
424325
e2eskipper.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
425326
}
426327

427328
ginkgo.By("running a Gu pod requesting multiple CPUs")
428-
ctnAttrs = []tmCtnAttribute{
429-
{
430-
ctnName: "gu-container",
431-
cpuRequest: "2000m",
432-
cpuLimit: "2000m",
433-
},
434-
}
435-
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
436-
pod = f.PodClient().CreateSync(pod)
437-
438-
ginkgo.By("checking if the expected cpuset was assigned")
439-
cpuListString = "1-2"
440-
if isHTEnabled() {
441-
cpuListString = "2-3"
442-
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
443-
if cpuList[1] != 1 {
444-
cset = cpuset.MustParse(getCPUSiblingList(1))
445-
cpuListString = fmt.Sprintf("%s", cset)
446-
}
447-
}
448-
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
449-
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
450-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
451-
pod.Spec.Containers[0].Name, pod.Name)
452-
453-
ginkgo.By("by deleting the pods and waiting for container removal")
454-
deletePods(f, []string{pod.Name})
455-
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
329+
runMultipleCPUGuPod(f)
456330

457331
ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs")
458-
ctnAttrs = []tmCtnAttribute{
459-
{
460-
ctnName: "gu-container1",
461-
cpuRequest: "1000m",
462-
cpuLimit: "1000m",
463-
},
464-
{
465-
ctnName: "gu-container2",
466-
cpuRequest: "1000m",
467-
cpuLimit: "1000m",
468-
},
469-
}
470-
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
471-
pod = f.PodClient().CreateSync(pod)
472-
473-
ginkgo.By("checking if the expected cpuset was assigned")
474-
cpu1, cpu2 = 1, 2
475-
if isHTEnabled() {
476-
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
477-
if cpuList[1] != 1 {
478-
cpu1, cpu2 = cpuList[1], 1
479-
}
480-
}
481-
482-
expAllowedCPUsListRegex = fmt.Sprintf("^%d|%d\n$", cpu1, cpu2)
483-
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
484-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
485-
pod.Spec.Containers[0].Name, pod.Name)
486-
487-
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
488-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
489-
pod.Spec.Containers[1].Name, pod.Name)
490-
491-
ginkgo.By("by deleting the pods and waiting for container removal")
492-
deletePods(f, []string{pod.Name})
493-
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
494-
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
332+
runMultipleCPUContainersGuPod(f)
495333

496334
ginkgo.By("running multiple Gu pods")
497-
ctnAttrs = []tmCtnAttribute{
498-
{
499-
ctnName: "gu-container1",
500-
cpuRequest: "1000m",
501-
cpuLimit: "1000m",
502-
},
503-
}
504-
pod1 = makeTopologyManagerPod("gu-pod1", ctnAttrs)
505-
pod1 = f.PodClient().CreateSync(pod1)
506-
507-
ctnAttrs = []tmCtnAttribute{
508-
{
509-
ctnName: "gu-container2",
510-
cpuRequest: "1000m",
511-
cpuLimit: "1000m",
512-
},
513-
}
514-
pod2 = makeTopologyManagerPod("gu-pod2", ctnAttrs)
515-
pod2 = f.PodClient().CreateSync(pod2)
516-
517-
ginkgo.By("checking if the expected cpuset was assigned")
518-
cpu1, cpu2 = 1, 2
519-
if isHTEnabled() {
520-
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
521-
if cpuList[1] != 1 {
522-
cpu1, cpu2 = cpuList[1], 1
523-
}
524-
}
525-
526-
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
527-
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
528-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
529-
pod1.Spec.Containers[0].Name, pod1.Name)
530-
531-
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu2)
532-
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
533-
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
534-
pod2.Spec.Containers[0].Name, pod2.Name)
535-
536-
ginkgo.By("by deleting the pods and waiting for container removal")
537-
deletePods(f, []string{pod1.Name, pod2.Name})
538-
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
539-
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
335+
runMultipleGuPods(f)
540336
}
541337

542338
func waitForAllContainerRemoval(podName, podNS string) {

0 commit comments

Comments
 (0)