Skip to content

Commit 5625483

Browse files
committed
KEP-4540: added e2e tests for strict-cpu-reservation option
1 parent 269bbac commit 5625483

File tree

2 files changed

+102
-13
lines changed

2 files changed

+102
-13
lines changed

test/e2e_node/cpu_manager_test.go

Lines changed: 99 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ func configureCPUManagerInKubelet(oldCfg *kubeletconfig.KubeletConfiguration, ku
327327
return newCfg
328328
}
329329

330-
func runGuPodTest(ctx context.Context, f *framework.Framework, cpuCount int) {
330+
func runGuPodTest(ctx context.Context, f *framework.Framework, cpuCount int, strictReservedCPUs cpuset.CPUSet) {
331331
var pod *v1.Pod
332332

333333
ctnAttrs := []ctnAttribute{
@@ -353,14 +353,15 @@ func runGuPodTest(ctx context.Context, f *framework.Framework, cpuCount int) {
353353
framework.ExpectNoError(err, "parsing cpuset from logs for [%s] of pod [%s]", cnt.Name, pod.Name)
354354

355355
gomega.Expect(cpus.Size()).To(gomega.Equal(cpuCount), "expected cpu set size == %d, got %q", cpuCount, cpus.String())
356+
gomega.Expect(cpus.Intersection(strictReservedCPUs).IsEmpty()).To(gomega.BeTrueBecause("cpuset %q should not contain strict reserved cpus %q", cpus.String(), strictReservedCPUs.String()))
356357
}
357358

358359
ginkgo.By("by deleting the pods and waiting for container removal")
359360
deletePods(ctx, f, []string{pod.Name})
360361
waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace)
361362
}
362363

363-
func runNonGuPodTest(ctx context.Context, f *framework.Framework, cpuCap int64) {
364+
func runNonGuPodTest(ctx context.Context, f *framework.Framework, cpuCap int64, strictReservedCPUs cpuset.CPUSet) {
364365
var ctnAttrs []ctnAttribute
365366
var err error
366367
var pod *v1.Pod
@@ -377,11 +378,10 @@ func runNonGuPodTest(ctx context.Context, f *framework.Framework, cpuCap int64)
377378
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
378379

379380
ginkgo.By("checking if the expected cpuset was assigned")
380-
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
381-
// on the single CPU node the only possible value is 0
382-
if cpuCap == 1 {
383-
expAllowedCPUsListRegex = "^0\n$"
384-
}
381+
expAllowedCPUs, err := cpuset.Parse(fmt.Sprintf("0-%d", cpuCap-1))
382+
framework.ExpectNoError(err)
383+
expAllowedCPUs = expAllowedCPUs.Difference(strictReservedCPUs)
384+
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", expAllowedCPUs.String())
385385
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
386386
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
387387
pod.Spec.Containers[0].Name, pod.Name)
@@ -890,10 +890,10 @@ func runCPUManagerTests(f *framework.Framework) {
890890
updateKubeletConfig(ctx, f, newCfg, true)
891891

892892
ginkgo.By("running a non-Gu pod")
893-
runNonGuPodTest(ctx, f, cpuCap)
893+
runNonGuPodTest(ctx, f, cpuCap, cpuset.New())
894894

895895
ginkgo.By("running a Gu pod")
896-
runGuPodTest(ctx, f, 1)
896+
runGuPodTest(ctx, f, 1, cpuset.New())
897897

898898
ginkgo.By("running multiple Gu and non-Gu pods")
899899
runMultipleGuNonGuPods(ctx, f, cpuCap, cpuAlloc)
@@ -916,6 +916,56 @@ func runCPUManagerTests(f *framework.Framework) {
916916
runAutomaticallyRemoveInactivePodsFromCPUManagerStateFile(ctx, f)
917917
})
918918

919+
ginkgo.It("reservedSystemCPUs are excluded only for Gu pods (strict-cpu-reservation option not enabled by default)", func(ctx context.Context) {
920+
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
921+
922+
// Skip CPU Manager tests altogether if the CPU capacity < 2.
923+
if cpuCap < 2 {
924+
e2eskipper.Skipf("Skipping CPU Manager tests since the CPU capacity < 2")
925+
}
926+
927+
reservedSystemCPUs := cpuset.New(0)
928+
newCfg := configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{
929+
policyName: string(cpumanager.PolicyStatic),
930+
reservedSystemCPUs: reservedSystemCPUs,
931+
})
932+
updateKubeletConfig(ctx, f, newCfg, true)
933+
934+
ginkgo.By("running a Gu pod - it shouldn't use reserved system CPUs")
935+
runGuPodTest(ctx, f, 1, reservedSystemCPUs)
936+
937+
ginkgo.By("running a non-Gu pod - it can use reserved system CPUs")
938+
runNonGuPodTest(ctx, f, cpuCap, cpuset.New())
939+
940+
})
941+
942+
ginkgo.It("reservedSystemCPUs are excluded for both Gu and non-Gu pods (strict-cpu-reservation option enabled)", func(ctx context.Context) {
943+
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
944+
945+
// Skip CPU Manager tests altogether if the CPU capacity < 2.
946+
if cpuCap < 2 {
947+
e2eskipper.Skipf("Skipping CPU Manager tests since the CPU capacity < 2")
948+
}
949+
950+
reservedSystemCPUs := cpuset.New(0)
951+
cpuPolicyOptions := map[string]string{
952+
cpumanager.StrictCPUReservationOption: "true",
953+
}
954+
newCfg := configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{
955+
policyName: string(cpumanager.PolicyStatic),
956+
reservedSystemCPUs: reservedSystemCPUs,
957+
enableCPUManagerOptions: true,
958+
options: cpuPolicyOptions,
959+
})
960+
updateKubeletConfig(ctx, f, newCfg, true)
961+
962+
ginkgo.By("running a Gu pod - it shouldn't use reserved system CPUs")
963+
runGuPodTest(ctx, f, 1, reservedSystemCPUs)
964+
965+
ginkgo.By("running a non-Gu pod - it shouldn't use reserved system CPUs with strict-cpu-reservation option enabled")
966+
runNonGuPodTest(ctx, f, cpuCap, reservedSystemCPUs)
967+
})
968+
919969
ginkgo.It("should assign CPUs as expected with enhanced policy based on strict SMT alignment", func(ctx context.Context) {
920970
fullCPUsOnlyOpt := fmt.Sprintf("option=%s", cpumanager.FullPCPUsOnlyOption)
921971
_, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
@@ -950,7 +1000,44 @@ func runCPUManagerTests(f *framework.Framework) {
9501000

9511001
// the order between negative and positive doesn't really matter
9521002
runSMTAlignmentNegativeTests(ctx, f)
953-
runSMTAlignmentPositiveTests(ctx, f, smtLevel)
1003+
runSMTAlignmentPositiveTests(ctx, f, smtLevel, cpuset.New())
1004+
})
1005+
1006+
ginkgo.It("should assign CPUs as expected based on strict SMT alignment, reservedSystemCPUs should be excluded (both strict-cpu-reservation and full-pcpus-only options enabled)", func(ctx context.Context) {
1007+
fullCPUsOnlyOpt := fmt.Sprintf("option=%s", cpumanager.FullPCPUsOnlyOption)
1008+
_, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
1009+
smtLevel := getSMTLevel()
1010+
1011+
// strict SMT alignment is trivially verified and granted on non-SMT systems
1012+
if smtLevel < 2 {
1013+
e2eskipper.Skipf("Skipping CPU Manager %s tests since SMT disabled", fullCPUsOnlyOpt)
1014+
}
1015+
1016+
// our tests want to allocate a full core, so we need at last smtLevel*2 virtual cpus
1017+
if cpuAlloc < int64(smtLevel*2) {
1018+
e2eskipper.Skipf("Skipping CPU Manager %s tests since the CPU capacity < %d", fullCPUsOnlyOpt, smtLevel*2)
1019+
}
1020+
1021+
framework.Logf("SMT level %d", smtLevel)
1022+
1023+
reservedSystemCPUs := cpuset.New(0)
1024+
cpuPolicyOptions := map[string]string{
1025+
cpumanager.FullPCPUsOnlyOption: "true",
1026+
cpumanager.StrictCPUReservationOption: "true",
1027+
}
1028+
newCfg := configureCPUManagerInKubelet(oldCfg,
1029+
&cpuManagerKubeletArguments{
1030+
policyName: string(cpumanager.PolicyStatic),
1031+
reservedSystemCPUs: reservedSystemCPUs,
1032+
enableCPUManagerOptions: true,
1033+
options: cpuPolicyOptions,
1034+
},
1035+
)
1036+
updateKubeletConfig(ctx, f, newCfg, true)
1037+
1038+
// the order between negative and positive doesn't really matter
1039+
runSMTAlignmentNegativeTests(ctx, f)
1040+
runSMTAlignmentPositiveTests(ctx, f, smtLevel, reservedSystemCPUs)
9541041
})
9551042

9561043
ginkgo.It("should not enforce CFS quota for containers with static CPUs assigned", func(ctx context.Context) {
@@ -1108,7 +1195,7 @@ func runSMTAlignmentNegativeTests(ctx context.Context, f *framework.Framework) {
11081195
waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace)
11091196
}
11101197

1111-
func runSMTAlignmentPositiveTests(ctx context.Context, f *framework.Framework, smtLevel int) {
1198+
func runSMTAlignmentPositiveTests(ctx context.Context, f *framework.Framework, smtLevel int, strictReservedCPUs cpuset.CPUSet) {
11121199
// positive test: try to run a container whose requests are a multiple of SMT level, check allocated cores
11131200
// 1. are core siblings
11141201
// 2. take a full core
@@ -1134,6 +1221,7 @@ func runSMTAlignmentPositiveTests(ctx context.Context, f *framework.Framework, s
11341221
cpus, err := cpuset.Parse(strings.TrimSpace(logs))
11351222
framework.ExpectNoError(err, "parsing cpuset from logs for [%s] of pod [%s]", cnt.Name, pod.Name)
11361223

1224+
gomega.Expect(cpus.Intersection(strictReservedCPUs).IsEmpty()).To(gomega.BeTrueBecause("cpuset %q should not contain strict reserved cpus %q", cpus.String(), strictReservedCPUs.String()))
11371225
validateSMTAlignment(cpus, smtLevel, pod, &cnt)
11381226
}
11391227

test/e2e_node/topology_manager_test.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package e2enode
1919
import (
2020
"context"
2121
"fmt"
22+
"k8s.io/utils/cpuset"
2223
"os"
2324
"os/exec"
2425
"regexp"
@@ -446,10 +447,10 @@ func runTopologyManagerPolicySuiteTests(ctx context.Context, f *framework.Framew
446447
}
447448

448449
ginkgo.By("running a non-Gu pod")
449-
runNonGuPodTest(ctx, f, cpuCap)
450+
runNonGuPodTest(ctx, f, cpuCap, cpuset.New())
450451

451452
ginkgo.By("running a Gu pod")
452-
runGuPodTest(ctx, f, 1)
453+
runGuPodTest(ctx, f, 1, cpuset.New())
453454

454455
// Skip rest of the tests if CPU allocatable < 3.
455456
if cpuAlloc < 3 {

0 commit comments

Comments
 (0)