@@ -327,7 +327,7 @@ func configureCPUManagerInKubelet(oldCfg *kubeletconfig.KubeletConfiguration, ku
327
327
return newCfg
328
328
}
329
329
330
- func runGuPodTest (ctx context.Context , f * framework.Framework , cpuCount int ) {
330
+ func runGuPodTest (ctx context.Context , f * framework.Framework , cpuCount int , strictReservedCPUs cpuset. CPUSet ) {
331
331
var pod * v1.Pod
332
332
333
333
ctnAttrs := []ctnAttribute {
@@ -353,14 +353,15 @@ func runGuPodTest(ctx context.Context, f *framework.Framework, cpuCount int) {
353
353
framework .ExpectNoError (err , "parsing cpuset from logs for [%s] of pod [%s]" , cnt .Name , pod .Name )
354
354
355
355
gomega .Expect (cpus .Size ()).To (gomega .Equal (cpuCount ), "expected cpu set size == %d, got %q" , cpuCount , cpus .String ())
356
+ gomega .Expect (cpus .Intersection (strictReservedCPUs ).IsEmpty ()).To (gomega .BeTrueBecause ("cpuset %q should not contain strict reserved cpus %q" , cpus .String (), strictReservedCPUs .String ()))
356
357
}
357
358
358
359
ginkgo .By ("by deleting the pods and waiting for container removal" )
359
360
deletePods (ctx , f , []string {pod .Name })
360
361
waitForAllContainerRemoval (ctx , pod .Name , pod .Namespace )
361
362
}
362
363
363
- func runNonGuPodTest (ctx context.Context , f * framework.Framework , cpuCap int64 ) {
364
+ func runNonGuPodTest (ctx context.Context , f * framework.Framework , cpuCap int64 , strictReservedCPUs cpuset. CPUSet ) {
364
365
var ctnAttrs []ctnAttribute
365
366
var err error
366
367
var pod * v1.Pod
@@ -377,11 +378,10 @@ func runNonGuPodTest(ctx context.Context, f *framework.Framework, cpuCap int64)
377
378
pod = e2epod .NewPodClient (f ).CreateSync (ctx , pod )
378
379
379
380
ginkgo .By ("checking if the expected cpuset was assigned" )
380
- expAllowedCPUsListRegex = fmt .Sprintf ("^0-%d\n $" , cpuCap - 1 )
381
- // on the single CPU node the only possible value is 0
382
- if cpuCap == 1 {
383
- expAllowedCPUsListRegex = "^0\n $"
384
- }
381
+ expAllowedCPUs , err := cpuset .Parse (fmt .Sprintf ("0-%d" , cpuCap - 1 ))
382
+ framework .ExpectNoError (err )
383
+ expAllowedCPUs = expAllowedCPUs .Difference (strictReservedCPUs )
384
+ expAllowedCPUsListRegex = fmt .Sprintf ("^%s\n $" , expAllowedCPUs .String ())
385
385
err = e2epod .NewPodClient (f ).MatchContainerOutput (ctx , pod .Name , pod .Spec .Containers [0 ].Name , expAllowedCPUsListRegex )
386
386
framework .ExpectNoError (err , "expected log not found in container [%s] of pod [%s]" ,
387
387
pod .Spec .Containers [0 ].Name , pod .Name )
@@ -890,10 +890,10 @@ func runCPUManagerTests(f *framework.Framework) {
890
890
updateKubeletConfig (ctx , f , newCfg , true )
891
891
892
892
ginkgo .By ("running a non-Gu pod" )
893
- runNonGuPodTest (ctx , f , cpuCap )
893
+ runNonGuPodTest (ctx , f , cpuCap , cpuset . New () )
894
894
895
895
ginkgo .By ("running a Gu pod" )
896
- runGuPodTest (ctx , f , 1 )
896
+ runGuPodTest (ctx , f , 1 , cpuset . New () )
897
897
898
898
ginkgo .By ("running multiple Gu and non-Gu pods" )
899
899
runMultipleGuNonGuPods (ctx , f , cpuCap , cpuAlloc )
@@ -916,6 +916,56 @@ func runCPUManagerTests(f *framework.Framework) {
916
916
runAutomaticallyRemoveInactivePodsFromCPUManagerStateFile (ctx , f )
917
917
})
918
918
919
+ ginkgo .It ("reservedSystemCPUs are excluded only for Gu pods (strict-cpu-reservation option not enabled by default)" , func (ctx context.Context ) {
920
+ cpuCap , cpuAlloc , _ = getLocalNodeCPUDetails (ctx , f )
921
+
922
+ // Skip CPU Manager tests altogether if the CPU capacity < 2.
923
+ if cpuCap < 2 {
924
+ e2eskipper .Skipf ("Skipping CPU Manager tests since the CPU capacity < 2" )
925
+ }
926
+
927
+ reservedSystemCPUs := cpuset .New (0 )
928
+ newCfg := configureCPUManagerInKubelet (oldCfg , & cpuManagerKubeletArguments {
929
+ policyName : string (cpumanager .PolicyStatic ),
930
+ reservedSystemCPUs : reservedSystemCPUs ,
931
+ })
932
+ updateKubeletConfig (ctx , f , newCfg , true )
933
+
934
+ ginkgo .By ("running a Gu pod - it shouldn't use reserved system CPUs" )
935
+ runGuPodTest (ctx , f , 1 , reservedSystemCPUs )
936
+
937
+ ginkgo .By ("running a non-Gu pod - it can use reserved system CPUs" )
938
+ runNonGuPodTest (ctx , f , cpuCap , cpuset .New ())
939
+
940
+ })
941
+
942
+ ginkgo .It ("reservedSystemCPUs are excluded for both Gu and non-Gu pods (strict-cpu-reservation option enabled)" , func (ctx context.Context ) {
943
+ cpuCap , cpuAlloc , _ = getLocalNodeCPUDetails (ctx , f )
944
+
945
+ // Skip CPU Manager tests altogether if the CPU capacity < 2.
946
+ if cpuCap < 2 {
947
+ e2eskipper .Skipf ("Skipping CPU Manager tests since the CPU capacity < 2" )
948
+ }
949
+
950
+ reservedSystemCPUs := cpuset .New (0 )
951
+ cpuPolicyOptions := map [string ]string {
952
+ cpumanager .StrictCPUReservationOption : "true" ,
953
+ }
954
+ newCfg := configureCPUManagerInKubelet (oldCfg , & cpuManagerKubeletArguments {
955
+ policyName : string (cpumanager .PolicyStatic ),
956
+ reservedSystemCPUs : reservedSystemCPUs ,
957
+ enableCPUManagerOptions : true ,
958
+ options : cpuPolicyOptions ,
959
+ })
960
+ updateKubeletConfig (ctx , f , newCfg , true )
961
+
962
+ ginkgo .By ("running a Gu pod - it shouldn't use reserved system CPUs" )
963
+ runGuPodTest (ctx , f , 1 , reservedSystemCPUs )
964
+
965
+ ginkgo .By ("running a non-Gu pod - it shouldn't use reserved system CPUs with strict-cpu-reservation option enabled" )
966
+ runNonGuPodTest (ctx , f , cpuCap , reservedSystemCPUs )
967
+ })
968
+
919
969
ginkgo .It ("should assign CPUs as expected with enhanced policy based on strict SMT alignment" , func (ctx context.Context ) {
920
970
fullCPUsOnlyOpt := fmt .Sprintf ("option=%s" , cpumanager .FullPCPUsOnlyOption )
921
971
_ , cpuAlloc , _ = getLocalNodeCPUDetails (ctx , f )
@@ -950,7 +1000,44 @@ func runCPUManagerTests(f *framework.Framework) {
950
1000
951
1001
// the order between negative and positive doesn't really matter
952
1002
runSMTAlignmentNegativeTests (ctx , f )
953
- runSMTAlignmentPositiveTests (ctx , f , smtLevel )
1003
+ runSMTAlignmentPositiveTests (ctx , f , smtLevel , cpuset .New ())
1004
+ })
1005
+
1006
+ ginkgo .It ("should assign CPUs as expected based on strict SMT alignment, reservedSystemCPUs should be excluded (both strict-cpu-reservation and full-pcpus-only options enabled)" , func (ctx context.Context ) {
1007
+ fullCPUsOnlyOpt := fmt .Sprintf ("option=%s" , cpumanager .FullPCPUsOnlyOption )
1008
+ _ , cpuAlloc , _ = getLocalNodeCPUDetails (ctx , f )
1009
+ smtLevel := getSMTLevel ()
1010
+
1011
+ // strict SMT alignment is trivially verified and granted on non-SMT systems
1012
+ if smtLevel < 2 {
1013
+ e2eskipper .Skipf ("Skipping CPU Manager %s tests since SMT disabled" , fullCPUsOnlyOpt )
1014
+ }
1015
+
1016
+ // our tests want to allocate a full core, so we need at last smtLevel*2 virtual cpus
1017
+ if cpuAlloc < int64 (smtLevel * 2 ) {
1018
+ e2eskipper .Skipf ("Skipping CPU Manager %s tests since the CPU capacity < %d" , fullCPUsOnlyOpt , smtLevel * 2 )
1019
+ }
1020
+
1021
+ framework .Logf ("SMT level %d" , smtLevel )
1022
+
1023
+ reservedSystemCPUs := cpuset .New (0 )
1024
+ cpuPolicyOptions := map [string ]string {
1025
+ cpumanager .FullPCPUsOnlyOption : "true" ,
1026
+ cpumanager .StrictCPUReservationOption : "true" ,
1027
+ }
1028
+ newCfg := configureCPUManagerInKubelet (oldCfg ,
1029
+ & cpuManagerKubeletArguments {
1030
+ policyName : string (cpumanager .PolicyStatic ),
1031
+ reservedSystemCPUs : reservedSystemCPUs ,
1032
+ enableCPUManagerOptions : true ,
1033
+ options : cpuPolicyOptions ,
1034
+ },
1035
+ )
1036
+ updateKubeletConfig (ctx , f , newCfg , true )
1037
+
1038
+ // the order between negative and positive doesn't really matter
1039
+ runSMTAlignmentNegativeTests (ctx , f )
1040
+ runSMTAlignmentPositiveTests (ctx , f , smtLevel , reservedSystemCPUs )
954
1041
})
955
1042
956
1043
ginkgo .It ("should not enforce CFS quota for containers with static CPUs assigned" , func (ctx context.Context ) {
@@ -1108,7 +1195,7 @@ func runSMTAlignmentNegativeTests(ctx context.Context, f *framework.Framework) {
1108
1195
waitForAllContainerRemoval (ctx , pod .Name , pod .Namespace )
1109
1196
}
1110
1197
1111
- func runSMTAlignmentPositiveTests (ctx context.Context , f * framework.Framework , smtLevel int ) {
1198
+ func runSMTAlignmentPositiveTests (ctx context.Context , f * framework.Framework , smtLevel int , strictReservedCPUs cpuset. CPUSet ) {
1112
1199
// positive test: try to run a container whose requests are a multiple of SMT level, check allocated cores
1113
1200
// 1. are core siblings
1114
1201
// 2. take a full core
@@ -1134,6 +1221,7 @@ func runSMTAlignmentPositiveTests(ctx context.Context, f *framework.Framework, s
1134
1221
cpus , err := cpuset .Parse (strings .TrimSpace (logs ))
1135
1222
framework .ExpectNoError (err , "parsing cpuset from logs for [%s] of pod [%s]" , cnt .Name , pod .Name )
1136
1223
1224
+ gomega .Expect (cpus .Intersection (strictReservedCPUs ).IsEmpty ()).To (gomega .BeTrueBecause ("cpuset %q should not contain strict reserved cpus %q" , cpus .String (), strictReservedCPUs .String ()))
1137
1225
validateSMTAlignment (cpus , smtLevel , pod , & cnt )
1138
1226
}
1139
1227
0 commit comments