@@ -21,11 +21,10 @@ import (
21
21
"fmt"
22
22
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
23
23
"k8s.io/kubernetes/pkg/kubelet/apis/config"
24
- "k8s.io/kubernetes/pkg/kubelet/cm"
25
24
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
26
25
"k8s.io/kubernetes/test/e2e/nodefeature"
26
+ imageutils "k8s.io/kubernetes/test/utils/image"
27
27
"math/big"
28
- "os/exec"
29
28
"path/filepath"
30
29
"strconv"
31
30
"strings"
@@ -53,8 +52,7 @@ const (
53
52
)
54
53
55
54
var (
56
- noRequests * resource.Quantity = nil
57
- noLimits * resource.Quantity = nil
55
+ noLimits * resource.Quantity = nil
58
56
)
59
57
60
58
var _ = SIGDescribe ("Swap" , "[LinuxOnly]" , nodefeature .Swap , func () {
@@ -77,14 +75,14 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
77
75
switch swapBehavior := getSwapBehavior (); swapBehavior {
78
76
case types .LimitedSwap :
79
77
if qosClass != v1 .PodQOSBurstable || memoryRequestEqualLimit {
80
- expectNoSwap (pod )
78
+ expectNoSwap (f , pod )
81
79
} else {
82
80
expectedSwapLimit := calcSwapForBurstablePod (f , pod )
83
- expectLimitedSwap (pod , expectedSwapLimit )
81
+ expectLimitedSwap (f , pod , expectedSwapLimit )
84
82
}
85
83
86
84
case types .NoSwap , "" :
87
- expectNoSwap (pod )
85
+ expectNoSwap (f , pod )
88
86
89
87
default :
90
88
gomega .Expect (swapBehavior ).To (gomega .Or (gomega .Equal (types .LimitedSwap ), gomega .Equal (types .NoSwap )), "unknown swap behavior" )
@@ -118,17 +116,17 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
118
116
119
117
sleepingPod := getSleepingPod (f .Namespace .Name )
120
118
sleepingPod = runPodAndWaitUntilScheduled (f , sleepingPod )
121
- gomega .Expect (isPodCgroupV2 (f , sleepingPod )).To (gomega .BeTrue (), "node uses cgroup v1" )
119
+ gomega .Expect (isPodCgroupV2 (f , sleepingPod )).To (gomega .BeTrueBecause ( "node uses cgroup v1" ) )
122
120
123
121
nodeName = sleepingPod .Spec .NodeName
124
122
gomega .Expect (nodeName ).ToNot (gomega .BeEmpty (), "node name is empty" )
125
123
126
124
nodeTotalMemory , nodeUsedMemory = getMemoryCapacity (f , nodeName )
127
- gomega .Expect (nodeTotalMemory .IsZero ()).To (gomega .BeFalse (), "node memory capacity is zero" )
128
- gomega .Expect (nodeUsedMemory .IsZero ()).To (gomega .BeFalse (), "node used memory is zero" )
125
+ gomega .Expect (nodeTotalMemory .IsZero ()).To (gomega .BeFalseBecause ( "node memory capacity is zero" ) )
126
+ gomega .Expect (nodeUsedMemory .IsZero ()).To (gomega .BeFalseBecause ( "node used memory is zero" ) )
129
127
130
128
swapCapacity = getSwapCapacity (f , sleepingPod )
131
- gomega .Expect (swapCapacity .IsZero ()).To (gomega .BeFalse (), "node swap capacity is zero" )
129
+ gomega .Expect (swapCapacity .IsZero ()).To (gomega .BeFalseBecause ( "node swap capacity is zero" ) )
132
130
133
131
err := podClient .Delete (context .Background (), sleepingPod .Name , metav1.DeleteOptions {})
134
132
framework .ExpectNoError (err )
@@ -170,13 +168,13 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
170
168
return pod
171
169
}
172
170
173
- ginkgo .It ("should be able over-commit the node memory" , func () {
171
+ ginkgo .It ("should be able to use more than the node memory capacity " , func () {
174
172
stressSize := cloneQuantity (nodeTotalMemory )
175
173
176
174
stressPod := getStressPod (stressSize )
177
175
// Request will use a lot more swap memory than needed, since we don't test swap limits in this test
178
176
memRequest := getRequestBySwapLimit (30 )
179
- setPodMmoryResources (stressPod , memRequest , noLimits )
177
+ setPodMemoryResources (stressPod , memRequest , noLimits )
180
178
gomega .Expect (qos .GetPodQOS (stressPod )).To (gomega .Equal (v1 .PodQOSBurstable ))
181
179
182
180
ginkgo .By (fmt .Sprintf ("creating a stress pod with stress size %s and request of %s" , stressSize .String (), memRequest .String ()))
@@ -189,7 +187,7 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
189
187
gomega .Expect (stressPod .Status .Phase ).To (gomega .Equal (v1 .PodRunning ), "pod should be running" )
190
188
191
189
var err error
192
- swapUsage , err = getSwapUsage (stressPod )
190
+ swapUsage , err = getSwapUsage (f , stressPod )
193
191
if err != nil {
194
192
return err
195
193
}
@@ -215,7 +213,7 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
215
213
memoryLimit .Sub (resource .MustParse ("50Mi" ))
216
214
memoryRequest := divideQuantity (memoryLimit , 2 )
217
215
ginkgo .By ("Adding memory request of " + memoryRequest .String () + " and memory limit of " + memoryLimit .String ())
218
- setPodMmoryResources (stressPod , memoryRequest , memoryLimit )
216
+ setPodMemoryResources (stressPod , memoryRequest , memoryLimit )
219
217
gomega .Expect (qos .GetPodQOS (stressPod )).To (gomega .Equal (v1 .PodQOSBurstable ))
220
218
221
219
var swapUsage , memoryUsage * resource.Quantity
@@ -231,12 +229,12 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
231
229
gomega .Expect (stressPod .Status .Phase ).To (gomega .Equal (v1 .PodRunning ), "pod should be running" )
232
230
233
231
var err error
234
- swapUsage , err = getSwapUsage (stressPod )
232
+ swapUsage , err = getSwapUsage (f , stressPod )
235
233
if err != nil {
236
234
return err
237
235
}
238
236
239
- memoryUsage , err = getMemoryUsage (stressPod )
237
+ memoryUsage , err = getMemoryUsage (f , stressPod )
240
238
if err != nil {
241
239
return err
242
240
}
@@ -323,9 +321,9 @@ func getStressPod(f *framework.Framework, stressSize, memAllocSize *resource.Qua
323
321
Containers : []v1.Container {
324
322
{
325
323
Name : "stress-container" ,
326
- Image : "registry.k8s.io/stress:v1" ,
324
+ Image : imageutils . GetE2EImage ( imageutils . Agnhost ) ,
327
325
ImagePullPolicy : v1 .PullAlways ,
328
- Args : []string {"- mem-alloc-size" , memAllocSize .String (), "-mem-alloc-sleep" , "500ms " , "-mem-total" , strconv .Itoa (int (stressSize .Value ()))},
326
+ Args : []string {"stress" , "-- mem-alloc-size" , memAllocSize .String (), "-- mem-alloc-sleep" , "1000ms " , "- -mem-total" , strconv .Itoa (int (stressSize .Value ()))},
329
327
},
330
328
},
331
329
},
@@ -349,7 +347,7 @@ func runPodAndWaitUntilScheduled(f *framework.Framework, pod *v1.Pod) *v1.Pod {
349
347
350
348
isReady , err := testutils .PodRunningReady (pod )
351
349
framework .ExpectNoError (err )
352
- gomega .ExpectWithOffset (1 , isReady ).To (gomega .BeTrue (), "pod should be ready" )
350
+ gomega .ExpectWithOffset (1 , isReady ).To (gomega .BeTrueBecause ( "pod should be ready" ) )
353
351
354
352
return pod
355
353
}
@@ -366,19 +364,19 @@ func isPodCgroupV2(f *framework.Framework, pod *v1.Pod) bool {
366
364
return output == "true"
367
365
}
368
366
369
- func expectNoSwap (pod * v1.Pod ) {
367
+ func expectNoSwap (f * framework. Framework , pod * v1.Pod ) {
370
368
ginkgo .By ("expecting no swap" )
371
369
const offest = 1
372
370
373
- swapLimit , err := readCgroupFile (pod , cgroupV2SwapLimitFile )
371
+ swapLimit , err := readCgroupFile (f , pod , cgroupV2SwapLimitFile )
374
372
gomega .ExpectWithOffset (offest , err ).ToNot (gomega .HaveOccurred ())
375
373
gomega .ExpectWithOffset (offest , swapLimit ).To (gomega .Equal ("0" ), "max swap allowed should be zero" )
376
374
}
377
375
378
376
// supports v2 only as v1 shouldn't support LimitedSwap
379
- func expectLimitedSwap (pod * v1.Pod , expectedSwapLimit int64 ) {
377
+ func expectLimitedSwap (f * framework. Framework , pod * v1.Pod , expectedSwapLimit int64 ) {
380
378
ginkgo .By ("expecting limited swap" )
381
- swapLimitStr , err := readCgroupFile (pod , cgroupV2SwapLimitFile )
379
+ swapLimitStr , err := readCgroupFile (f , pod , cgroupV2SwapLimitFile )
382
380
framework .ExpectNoError (err )
383
381
384
382
swapLimit , err := strconv .Atoi (swapLimitStr )
@@ -467,49 +465,11 @@ func multiplyQuantity(quantity *resource.Quantity, multiplier int64) *resource.Q
467
465
return resource .NewQuantity (product .Int64 (), quantity .Format )
468
466
}
469
467
470
- func multiplyQuantities ( quantity1 , quantity2 * resource. Quantity ) * resource. Quantity {
471
- product := new (big. Int ). Mul ( quantity1 . AsDec (). UnscaledBig (), quantity2 . AsDec (). UnscaledBig () )
468
+ func readCgroupFile ( f * framework. Framework , pod * v1. Pod , cgroupFile string ) ( string , error ) {
469
+ cgroupPath := filepath . Join ( cgroupBasePath , cgroupFile )
472
470
473
- return resource .NewQuantity (product .Int64 (), quantity1 .Format )
474
- }
475
-
476
- func getPodCgroupPath (pod * v1.Pod ) string {
477
- podQos := qos .GetPodQOS (pod )
478
- cgroupQosComponent := ""
479
-
480
- switch podQos {
481
- case v1 .PodQOSBestEffort :
482
- cgroupQosComponent = bestEffortCgroup
483
- case v1 .PodQOSBurstable :
484
- cgroupQosComponent = burstableCgroup
485
- }
486
-
487
- var rootCgroupName cm.CgroupName
488
- if cgroupQosComponent != "" {
489
- rootCgroupName = cm .NewCgroupName (cm .RootCgroupName , defaultNodeAllocatableCgroup , cgroupQosComponent )
490
- } else {
491
- rootCgroupName = cm .NewCgroupName (cm .RootCgroupName , defaultNodeAllocatableCgroup )
492
- }
493
-
494
- cgroupsToVerify := "pod" + string (pod .UID )
495
- cgroupName := cm .NewCgroupName (rootCgroupName , cgroupsToVerify )
496
- cgroupFsPath := toCgroupFsName (cgroupName )
497
-
498
- return filepath .Join (cgroupBasePath , cgroupFsPath )
499
- }
500
-
501
- func readCgroupFile (pod * v1.Pod , cgroupFile string ) (string , error ) {
502
- cgroupPath := getPodCgroupPath (pod )
503
- cgroupFilePath := filepath .Join (cgroupPath , cgroupFile )
504
-
505
- ginkgo .By ("Reading cgroup file: " + cgroupFilePath )
506
- cmd := "cat " + cgroupFilePath
507
- outputBytes , err := exec .Command ("sudo" , "sh" , "-c" , cmd ).CombinedOutput ()
508
- if err != nil {
509
- return "" , fmt .Errorf ("error running cmd %s: %w" , cmd , err )
510
- }
511
-
512
- outputStr := strings .TrimSpace (string (outputBytes ))
471
+ outputStr := e2epod .ExecCommandInContainer (f , pod .Name , pod .Spec .Containers [0 ].Name , "sh" , "-c" , "cat " + cgroupPath )
472
+ outputStr = strings .TrimSpace (outputStr )
513
473
ginkgo .By ("cgroup found value: " + outputStr )
514
474
515
475
return outputStr , nil
@@ -524,8 +484,8 @@ func parseBytesStrToQuantity(bytesStr string) (*resource.Quantity, error) {
524
484
return resource .NewQuantity (bytesInt , resource .BinarySI ), nil
525
485
}
526
486
527
- func getSwapUsage (pod * v1.Pod ) (* resource.Quantity , error ) {
528
- outputStr , err := readCgroupFile (pod , cgroupV2swapCurrentUsageFile )
487
+ func getSwapUsage (f * framework. Framework , pod * v1.Pod ) (* resource.Quantity , error ) {
488
+ outputStr , err := readCgroupFile (f , pod , cgroupV2swapCurrentUsageFile )
529
489
if err != nil {
530
490
return nil , err
531
491
}
@@ -535,8 +495,8 @@ func getSwapUsage(pod *v1.Pod) (*resource.Quantity, error) {
535
495
return parseBytesStrToQuantity (outputStr )
536
496
}
537
497
538
- func getMemoryUsage (pod * v1.Pod ) (* resource.Quantity , error ) {
539
- outputStr , err := readCgroupFile (pod , cgroupV2MemoryCurrentUsageFile )
498
+ func getMemoryUsage (f * framework. Framework , pod * v1.Pod ) (* resource.Quantity , error ) {
499
+ outputStr , err := readCgroupFile (f , pod , cgroupV2MemoryCurrentUsageFile )
540
500
if err != nil {
541
501
return nil , err
542
502
}
@@ -548,7 +508,7 @@ func getMemoryUsage(pod *v1.Pod) (*resource.Quantity, error) {
548
508
549
509
// Sets memory request or limit can be null, then it's dismissed.
550
510
// Sets the same value for all containers.
551
- func setPodMmoryResources (pod * v1.Pod , memoryRequest , memoryLimit * resource.Quantity ) {
511
+ func setPodMemoryResources (pod * v1.Pod , memoryRequest , memoryLimit * resource.Quantity ) {
552
512
for i := range pod .Spec .Containers {
553
513
resources := & pod .Spec .Containers [i ].Resources
554
514
0 commit comments