Skip to content

Commit ab5f84e

Browse files
committed
Refactor: Better pod image, read cgroup file from container
- Improve cgroup file read: execute from container instead of host - Clean unused variables/functions - BeTrue/BeFalse -> BeTrueBecause/BeFalseBecause - Use agnhost instread of stress image - Improve description and fix typo Signed-off-by: Itamar Holder <[email protected]>
1 parent b170509 commit ab5f84e

File tree

1 file changed

+31
-71
lines changed

1 file changed

+31
-71
lines changed

test/e2e_node/swap_test.go

Lines changed: 31 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,10 @@ import (
2121
"fmt"
2222
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
2323
"k8s.io/kubernetes/pkg/kubelet/apis/config"
24-
"k8s.io/kubernetes/pkg/kubelet/cm"
2524
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
2625
"k8s.io/kubernetes/test/e2e/nodefeature"
26+
imageutils "k8s.io/kubernetes/test/utils/image"
2727
"math/big"
28-
"os/exec"
2928
"path/filepath"
3029
"strconv"
3130
"strings"
@@ -53,8 +52,7 @@ const (
5352
)
5453

5554
var (
56-
noRequests *resource.Quantity = nil
57-
noLimits *resource.Quantity = nil
55+
noLimits *resource.Quantity = nil
5856
)
5957

6058
var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
@@ -77,14 +75,14 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
7775
switch swapBehavior := getSwapBehavior(); swapBehavior {
7876
case types.LimitedSwap:
7977
if qosClass != v1.PodQOSBurstable || memoryRequestEqualLimit {
80-
expectNoSwap(pod)
78+
expectNoSwap(f, pod)
8179
} else {
8280
expectedSwapLimit := calcSwapForBurstablePod(f, pod)
83-
expectLimitedSwap(pod, expectedSwapLimit)
81+
expectLimitedSwap(f, pod, expectedSwapLimit)
8482
}
8583

8684
case types.NoSwap, "":
87-
expectNoSwap(pod)
85+
expectNoSwap(f, pod)
8886

8987
default:
9088
gomega.Expect(swapBehavior).To(gomega.Or(gomega.Equal(types.LimitedSwap), gomega.Equal(types.NoSwap)), "unknown swap behavior")
@@ -118,17 +116,17 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
118116

119117
sleepingPod := getSleepingPod(f.Namespace.Name)
120118
sleepingPod = runPodAndWaitUntilScheduled(f, sleepingPod)
121-
gomega.Expect(isPodCgroupV2(f, sleepingPod)).To(gomega.BeTrue(), "node uses cgroup v1")
119+
gomega.Expect(isPodCgroupV2(f, sleepingPod)).To(gomega.BeTrueBecause("node uses cgroup v1"))
122120

123121
nodeName = sleepingPod.Spec.NodeName
124122
gomega.Expect(nodeName).ToNot(gomega.BeEmpty(), "node name is empty")
125123

126124
nodeTotalMemory, nodeUsedMemory = getMemoryCapacity(f, nodeName)
127-
gomega.Expect(nodeTotalMemory.IsZero()).To(gomega.BeFalse(), "node memory capacity is zero")
128-
gomega.Expect(nodeUsedMemory.IsZero()).To(gomega.BeFalse(), "node used memory is zero")
125+
gomega.Expect(nodeTotalMemory.IsZero()).To(gomega.BeFalseBecause("node memory capacity is zero"))
126+
gomega.Expect(nodeUsedMemory.IsZero()).To(gomega.BeFalseBecause("node used memory is zero"))
129127

130128
swapCapacity = getSwapCapacity(f, sleepingPod)
131-
gomega.Expect(swapCapacity.IsZero()).To(gomega.BeFalse(), "node swap capacity is zero")
129+
gomega.Expect(swapCapacity.IsZero()).To(gomega.BeFalseBecause("node swap capacity is zero"))
132130

133131
err := podClient.Delete(context.Background(), sleepingPod.Name, metav1.DeleteOptions{})
134132
framework.ExpectNoError(err)
@@ -170,13 +168,13 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
170168
return pod
171169
}
172170

173-
ginkgo.It("should be able over-commit the node memory", func() {
171+
ginkgo.It("should be able to use more than the node memory capacity", func() {
174172
stressSize := cloneQuantity(nodeTotalMemory)
175173

176174
stressPod := getStressPod(stressSize)
177175
// Request will use a lot more swap memory than needed, since we don't test swap limits in this test
178176
memRequest := getRequestBySwapLimit(30)
179-
setPodMmoryResources(stressPod, memRequest, noLimits)
177+
setPodMemoryResources(stressPod, memRequest, noLimits)
180178
gomega.Expect(qos.GetPodQOS(stressPod)).To(gomega.Equal(v1.PodQOSBurstable))
181179

182180
ginkgo.By(fmt.Sprintf("creating a stress pod with stress size %s and request of %s", stressSize.String(), memRequest.String()))
@@ -189,7 +187,7 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
189187
gomega.Expect(stressPod.Status.Phase).To(gomega.Equal(v1.PodRunning), "pod should be running")
190188

191189
var err error
192-
swapUsage, err = getSwapUsage(stressPod)
190+
swapUsage, err = getSwapUsage(f, stressPod)
193191
if err != nil {
194192
return err
195193
}
@@ -215,7 +213,7 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
215213
memoryLimit.Sub(resource.MustParse("50Mi"))
216214
memoryRequest := divideQuantity(memoryLimit, 2)
217215
ginkgo.By("Adding memory request of " + memoryRequest.String() + " and memory limit of " + memoryLimit.String())
218-
setPodMmoryResources(stressPod, memoryRequest, memoryLimit)
216+
setPodMemoryResources(stressPod, memoryRequest, memoryLimit)
219217
gomega.Expect(qos.GetPodQOS(stressPod)).To(gomega.Equal(v1.PodQOSBurstable))
220218

221219
var swapUsage, memoryUsage *resource.Quantity
@@ -231,12 +229,12 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
231229
gomega.Expect(stressPod.Status.Phase).To(gomega.Equal(v1.PodRunning), "pod should be running")
232230

233231
var err error
234-
swapUsage, err = getSwapUsage(stressPod)
232+
swapUsage, err = getSwapUsage(f, stressPod)
235233
if err != nil {
236234
return err
237235
}
238236

239-
memoryUsage, err = getMemoryUsage(stressPod)
237+
memoryUsage, err = getMemoryUsage(f, stressPod)
240238
if err != nil {
241239
return err
242240
}
@@ -323,9 +321,9 @@ func getStressPod(f *framework.Framework, stressSize, memAllocSize *resource.Qua
323321
Containers: []v1.Container{
324322
{
325323
Name: "stress-container",
326-
Image: "registry.k8s.io/stress:v1",
324+
Image: imageutils.GetE2EImage(imageutils.Agnhost),
327325
ImagePullPolicy: v1.PullAlways,
328-
Args: []string{"-mem-alloc-size", memAllocSize.String(), "-mem-alloc-sleep", "500ms", "-mem-total", strconv.Itoa(int(stressSize.Value()))},
326+
Args: []string{"stress", "--mem-alloc-size", memAllocSize.String(), "--mem-alloc-sleep", "1000ms", "--mem-total", strconv.Itoa(int(stressSize.Value()))},
329327
},
330328
},
331329
},
@@ -349,7 +347,7 @@ func runPodAndWaitUntilScheduled(f *framework.Framework, pod *v1.Pod) *v1.Pod {
349347

350348
isReady, err := testutils.PodRunningReady(pod)
351349
framework.ExpectNoError(err)
352-
gomega.ExpectWithOffset(1, isReady).To(gomega.BeTrue(), "pod should be ready")
350+
gomega.ExpectWithOffset(1, isReady).To(gomega.BeTrueBecause("pod should be ready"))
353351

354352
return pod
355353
}
@@ -366,19 +364,19 @@ func isPodCgroupV2(f *framework.Framework, pod *v1.Pod) bool {
366364
return output == "true"
367365
}
368366

369-
func expectNoSwap(pod *v1.Pod) {
367+
func expectNoSwap(f *framework.Framework, pod *v1.Pod) {
370368
ginkgo.By("expecting no swap")
371369
const offest = 1
372370

373-
swapLimit, err := readCgroupFile(pod, cgroupV2SwapLimitFile)
371+
swapLimit, err := readCgroupFile(f, pod, cgroupV2SwapLimitFile)
374372
gomega.ExpectWithOffset(offest, err).ToNot(gomega.HaveOccurred())
375373
gomega.ExpectWithOffset(offest, swapLimit).To(gomega.Equal("0"), "max swap allowed should be zero")
376374
}
377375

378376
// supports v2 only as v1 shouldn't support LimitedSwap
379-
func expectLimitedSwap(pod *v1.Pod, expectedSwapLimit int64) {
377+
func expectLimitedSwap(f *framework.Framework, pod *v1.Pod, expectedSwapLimit int64) {
380378
ginkgo.By("expecting limited swap")
381-
swapLimitStr, err := readCgroupFile(pod, cgroupV2SwapLimitFile)
379+
swapLimitStr, err := readCgroupFile(f, pod, cgroupV2SwapLimitFile)
382380
framework.ExpectNoError(err)
383381

384382
swapLimit, err := strconv.Atoi(swapLimitStr)
@@ -467,49 +465,11 @@ func multiplyQuantity(quantity *resource.Quantity, multiplier int64) *resource.Q
467465
return resource.NewQuantity(product.Int64(), quantity.Format)
468466
}
469467

470-
func multiplyQuantities(quantity1, quantity2 *resource.Quantity) *resource.Quantity {
471-
product := new(big.Int).Mul(quantity1.AsDec().UnscaledBig(), quantity2.AsDec().UnscaledBig())
468+
func readCgroupFile(f *framework.Framework, pod *v1.Pod, cgroupFile string) (string, error) {
469+
cgroupPath := filepath.Join(cgroupBasePath, cgroupFile)
472470

473-
return resource.NewQuantity(product.Int64(), quantity1.Format)
474-
}
475-
476-
func getPodCgroupPath(pod *v1.Pod) string {
477-
podQos := qos.GetPodQOS(pod)
478-
cgroupQosComponent := ""
479-
480-
switch podQos {
481-
case v1.PodQOSBestEffort:
482-
cgroupQosComponent = bestEffortCgroup
483-
case v1.PodQOSBurstable:
484-
cgroupQosComponent = burstableCgroup
485-
}
486-
487-
var rootCgroupName cm.CgroupName
488-
if cgroupQosComponent != "" {
489-
rootCgroupName = cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, cgroupQosComponent)
490-
} else {
491-
rootCgroupName = cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup)
492-
}
493-
494-
cgroupsToVerify := "pod" + string(pod.UID)
495-
cgroupName := cm.NewCgroupName(rootCgroupName, cgroupsToVerify)
496-
cgroupFsPath := toCgroupFsName(cgroupName)
497-
498-
return filepath.Join(cgroupBasePath, cgroupFsPath)
499-
}
500-
501-
func readCgroupFile(pod *v1.Pod, cgroupFile string) (string, error) {
502-
cgroupPath := getPodCgroupPath(pod)
503-
cgroupFilePath := filepath.Join(cgroupPath, cgroupFile)
504-
505-
ginkgo.By("Reading cgroup file: " + cgroupFilePath)
506-
cmd := "cat " + cgroupFilePath
507-
outputBytes, err := exec.Command("sudo", "sh", "-c", cmd).CombinedOutput()
508-
if err != nil {
509-
return "", fmt.Errorf("error running cmd %s: %w", cmd, err)
510-
}
511-
512-
outputStr := strings.TrimSpace(string(outputBytes))
471+
outputStr := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "sh", "-c", "cat "+cgroupPath)
472+
outputStr = strings.TrimSpace(outputStr)
513473
ginkgo.By("cgroup found value: " + outputStr)
514474

515475
return outputStr, nil
@@ -524,8 +484,8 @@ func parseBytesStrToQuantity(bytesStr string) (*resource.Quantity, error) {
524484
return resource.NewQuantity(bytesInt, resource.BinarySI), nil
525485
}
526486

527-
func getSwapUsage(pod *v1.Pod) (*resource.Quantity, error) {
528-
outputStr, err := readCgroupFile(pod, cgroupV2swapCurrentUsageFile)
487+
func getSwapUsage(f *framework.Framework, pod *v1.Pod) (*resource.Quantity, error) {
488+
outputStr, err := readCgroupFile(f, pod, cgroupV2swapCurrentUsageFile)
529489
if err != nil {
530490
return nil, err
531491
}
@@ -535,8 +495,8 @@ func getSwapUsage(pod *v1.Pod) (*resource.Quantity, error) {
535495
return parseBytesStrToQuantity(outputStr)
536496
}
537497

538-
func getMemoryUsage(pod *v1.Pod) (*resource.Quantity, error) {
539-
outputStr, err := readCgroupFile(pod, cgroupV2MemoryCurrentUsageFile)
498+
func getMemoryUsage(f *framework.Framework, pod *v1.Pod) (*resource.Quantity, error) {
499+
outputStr, err := readCgroupFile(f, pod, cgroupV2MemoryCurrentUsageFile)
540500
if err != nil {
541501
return nil, err
542502
}
@@ -548,7 +508,7 @@ func getMemoryUsage(pod *v1.Pod) (*resource.Quantity, error) {
548508

549509
// Sets memory request or limit can be null, then it's dismissed.
550510
// Sets the same value for all containers.
551-
func setPodMmoryResources(pod *v1.Pod, memoryRequest, memoryLimit *resource.Quantity) {
511+
func setPodMemoryResources(pod *v1.Pod, memoryRequest, memoryLimit *resource.Quantity) {
552512
for i := range pod.Spec.Containers {
553513
resources := &pod.Spec.Containers[i].Resources
554514

0 commit comments

Comments
 (0)