Skip to content

Commit b8897e6

Browse files
committed
test: refactor duplicate IPPR e22 tests.
This change refactors duplicate IPPR cluster and node e2e tests under test/e2e/common directory
1 parent 6203006 commit b8897e6

File tree

2 files changed

+94
-1107
lines changed

2 files changed

+94
-1107
lines changed

test/e2e_node/pod_resize_test.go renamed to test/e2e/common/node/pod_resize.go

Lines changed: 89 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
1414
limitations under the License.
1515
*/
1616

17-
package e2enode
17+
package node
1818

1919
import (
2020
"context"
21+
"encoding/json"
2122
"fmt"
2223
"regexp"
2324
"strconv"
@@ -32,14 +33,16 @@ import (
3233
"k8s.io/apimachinery/pkg/api/resource"
3334
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3435
"k8s.io/apimachinery/pkg/types"
36+
"k8s.io/apimachinery/pkg/util/strategicpatch"
3537
clientset "k8s.io/client-go/kubernetes"
38+
3639
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
3740
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
3841
"k8s.io/kubernetes/test/e2e/feature"
3942
"k8s.io/kubernetes/test/e2e/framework"
43+
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
4044
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
4145
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
42-
testutils "k8s.io/kubernetes/test/utils"
4346
imageutils "k8s.io/kubernetes/test/utils/image"
4447
)
4548

@@ -59,10 +62,7 @@ const (
5962
)
6063

6164
var (
62-
podOnCgroupv2Node bool = IsCgroup2UnifiedMode()
63-
cgroupMemLimit string = Cgroupv2MemLimit
64-
cgroupCPULimit string = Cgroupv2CPULimit
65-
cgroupCPURequest string = Cgroupv2CPURequest
65+
podOnCgroupv2Node *bool
6666
)
6767

6868
type ContainerResources struct {
@@ -114,16 +114,19 @@ type patchSpec struct {
114114
} `json:"spec"`
115115
}
116116

117-
func supportsInPlacePodVerticalScaling(ctx context.Context, f *framework.Framework) bool {
118-
node := getLocalNode(ctx, f)
117+
func isInPlacePodVerticalScalingSupportedByRuntime(ctx context.Context, c clientset.Interface) bool {
118+
node, err := e2enode.GetRandomReadySchedulableNode(ctx, c)
119+
framework.ExpectNoError(err)
119120
re := regexp.MustCompile("containerd://(.*)")
120121
match := re.FindStringSubmatch(node.Status.NodeInfo.ContainerRuntimeVersion)
121122
if len(match) != 2 {
122123
return false
123124
}
124-
// TODO(InPlacePodVerticalScaling): Update when RuntimeHandlerFeature for pod resize have been implemented
125125
if ver, verr := semver.ParseTolerant(match[1]); verr == nil {
126-
return ver.Compare(semver.MustParse(MinContainerRuntimeVersion)) >= 0
126+
if ver.Compare(semver.MustParse(MinContainerRuntimeVersion)) < 0 {
127+
return false
128+
}
129+
return true
127130
}
128131
return false
129132
}
@@ -222,15 +225,11 @@ func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStat
222225

223226
func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod {
224227
var testContainers []v1.Container
225-
var podOS *v1.PodOS
226228

227229
for _, ci := range tcInfo {
228230
tc, _ := makeTestContainer(ci)
229231
testContainers = append(testContainers, tc)
230232
}
231-
232-
podOS = &v1.PodOS{Name: v1.Linux}
233-
234233
pod := &v1.Pod{
235234
ObjectMeta: metav1.ObjectMeta{
236235
Name: name,
@@ -240,97 +239,103 @@ func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod
240239
},
241240
},
242241
Spec: v1.PodSpec{
243-
OS: podOS,
242+
OS: &v1.PodOS{Name: v1.Linux},
244243
Containers: testContainers,
245244
RestartPolicy: v1.RestartPolicyOnFailure,
246245
},
247246
}
248247
return pod
249248
}
250249

251-
func verifyPodResizePolicy(pod *v1.Pod, tcInfo []TestContainerInfo) {
250+
func verifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []TestContainerInfo) {
252251
ginkgo.GinkgoHelper()
253-
cMap := make(map[string]*v1.Container)
254-
for i, c := range pod.Spec.Containers {
255-
cMap[c.Name] = &pod.Spec.Containers[i]
256-
}
257-
for _, ci := range tcInfo {
258-
gomega.Expect(cMap).Should(gomega.HaveKey(ci.Name))
259-
c := cMap[ci.Name]
260-
tc, _ := makeTestContainer(ci)
261-
gomega.Expect(tc.ResizePolicy).To(gomega.Equal(c.ResizePolicy))
252+
for i, wantCtr := range wantCtrs {
253+
gotCtr := &gotPod.Spec.Containers[i]
254+
ctr, _ := makeTestContainer(wantCtr)
255+
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
256+
gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
262257
}
263258
}
264259

265-
func verifyPodResources(pod *v1.Pod, tcInfo []TestContainerInfo) {
260+
func verifyPodResources(gotPod *v1.Pod, wantCtrs []TestContainerInfo) {
266261
ginkgo.GinkgoHelper()
267-
cMap := make(map[string]*v1.Container)
268-
for i, c := range pod.Spec.Containers {
269-
cMap[c.Name] = &pod.Spec.Containers[i]
270-
}
271-
for _, ci := range tcInfo {
272-
gomega.Expect(cMap).Should(gomega.HaveKey(ci.Name))
273-
c := cMap[ci.Name]
274-
tc, _ := makeTestContainer(ci)
275-
gomega.Expect(tc.Resources).To(gomega.Equal(c.Resources))
262+
for i, wantCtr := range wantCtrs {
263+
gotCtr := &gotPod.Spec.Containers[i]
264+
ctr, _ := makeTestContainer(wantCtr)
265+
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
266+
gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
276267
}
277268
}
278269

279-
func verifyPodAllocations(pod *v1.Pod, tcInfo []TestContainerInfo) error {
270+
func verifyPodAllocations(gotPod *v1.Pod, wantCtrs []TestContainerInfo) error {
280271
ginkgo.GinkgoHelper()
281-
cStatusMap := make(map[string]*v1.ContainerStatus)
282-
for i, c := range pod.Status.ContainerStatuses {
283-
cStatusMap[c.Name] = &pod.Status.ContainerStatuses[i]
284-
}
285-
286-
for _, ci := range tcInfo {
287-
gomega.Expect(cStatusMap).Should(gomega.HaveKey(ci.Name))
288-
cStatus := cStatusMap[ci.Name]
289-
if ci.Allocations == nil {
290-
if ci.Resources != nil {
291-
alloc := &ContainerAllocations{CPUAlloc: ci.Resources.CPUReq, MemAlloc: ci.Resources.MemReq}
292-
ci.Allocations = alloc
272+
for i, wantCtr := range wantCtrs {
273+
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
274+
if wantCtr.Allocations == nil {
275+
if wantCtr.Resources != nil {
276+
alloc := &ContainerAllocations{CPUAlloc: wantCtr.Resources.CPUReq, MemAlloc: wantCtr.Resources.MemReq}
277+
wantCtr.Allocations = alloc
293278
defer func() {
294-
ci.Allocations = nil
279+
wantCtr.Allocations = nil
295280
}()
296281
}
297282
}
298283

299-
_, tcStatus := makeTestContainer(ci)
300-
if !cmp.Equal(cStatus.AllocatedResources, tcStatus.AllocatedResources) {
284+
_, ctrStatus := makeTestContainer(wantCtr)
285+
gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctrStatus.Name))
286+
if !cmp.Equal(gotCtrStatus.AllocatedResources, ctrStatus.AllocatedResources) {
301287
return fmt.Errorf("failed to verify Pod allocations, allocated resources not equal to expected")
302288
}
303289
}
304290
return nil
305291
}
306292

307-
func verifyPodStatusResources(pod *v1.Pod, tcInfo []TestContainerInfo) {
293+
func verifyPodStatusResources(gotPod *v1.Pod, wantCtrs []TestContainerInfo) {
308294
ginkgo.GinkgoHelper()
309-
csMap := make(map[string]*v1.ContainerStatus)
310-
for i, c := range pod.Status.ContainerStatuses {
311-
csMap[c.Name] = &pod.Status.ContainerStatuses[i]
295+
for i, wantCtr := range wantCtrs {
296+
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
297+
ctr, _ := makeTestContainer(wantCtr)
298+
gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctr.Name))
299+
gomega.Expect(ctr.Resources).To(gomega.Equal(*gotCtrStatus.Resources))
312300
}
313-
for _, ci := range tcInfo {
314-
gomega.Expect(csMap).Should(gomega.HaveKey(ci.Name))
315-
cs := csMap[ci.Name]
316-
tc, _ := makeTestContainer(ci)
317-
gomega.Expect(tc.Resources).To(gomega.Equal(*cs.Resources))
301+
}
302+
303+
func isPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
304+
// Determine if pod is running on cgroupv2 or cgroupv1 node
305+
//TODO(vinaykul,InPlacePodVerticalScaling): Is there a better way to determine this?
306+
cmd := "mount -t cgroup2"
307+
out, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", cmd)
308+
if err != nil {
309+
return false
318310
}
311+
return len(out) != 0
319312
}
320313

321314
func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework, pod *v1.Pod, tcInfo []TestContainerInfo) error {
322315
ginkgo.GinkgoHelper()
316+
if podOnCgroupv2Node == nil {
317+
value := isPodOnCgroupv2Node(f, pod)
318+
podOnCgroupv2Node = &value
319+
}
320+
cgroupMemLimit := Cgroupv2MemLimit
321+
cgroupCPULimit := Cgroupv2CPULimit
322+
cgroupCPURequest := Cgroupv2CPURequest
323+
if !*podOnCgroupv2Node {
324+
cgroupMemLimit = CgroupMemLimit
325+
cgroupCPULimit = CgroupCPUQuota
326+
cgroupCPURequest = CgroupCPUShares
327+
}
323328
verifyCgroupValue := func(cName, cgPath, expectedCgValue string) error {
324-
mycmd := fmt.Sprintf("head -n 1 %s", cgPath)
325-
cgValue, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", mycmd)
329+
cmd := fmt.Sprintf("head -n 1 %s", cgPath)
326330
framework.Logf("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s",
327331
pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
332+
cgValue, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
328333
if err != nil {
329-
return fmt.Errorf("failed to find expected value '%s' in container cgroup '%s'", expectedCgValue, cgPath)
334+
return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath)
330335
}
331336
cgValue = strings.Trim(cgValue, "\n")
332337
if cgValue != expectedCgValue {
333-
return fmt.Errorf("cgroup value '%s' not equal to expected '%s'", cgValue, expectedCgValue)
338+
return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue)
334339
}
335340
return nil
336341
}
@@ -356,7 +361,7 @@ func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
356361
}
357362
expectedCPULimitString = strconv.FormatInt(cpuQuota, 10)
358363
expectedMemLimitString = strconv.FormatInt(expectedMemLimitInBytes, 10)
359-
if podOnCgroupv2Node {
364+
if *podOnCgroupv2Node {
360365
if expectedCPULimitString == "-1" {
361366
expectedCPULimitString = "max"
362367
}
@@ -387,17 +392,25 @@ func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
387392
return nil
388393
}
389394

390-
func waitForContainerRestart(ctx context.Context, f *framework.Framework, podClient *e2epod.PodClient, pod *v1.Pod, expectedContainers []TestContainerInfo) error {
395+
func waitForContainerRestart(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, isRollback bool) error {
391396
ginkgo.GinkgoHelper()
392397
var restartContainersExpected []string
393-
for _, ci := range expectedContainers {
398+
399+
restartContainers := expectedContainers
400+
// if we're rolling back, extract restart counts from test case "expected" containers
401+
if isRollback {
402+
restartContainers = initialContainers
403+
}
404+
405+
for _, ci := range restartContainers {
394406
if ci.RestartCount > 0 {
395407
restartContainersExpected = append(restartContainersExpected, ci.Name)
396408
}
397409
}
398410
if len(restartContainersExpected) == 0 {
399411
return nil
400412
}
413+
401414
pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
402415
if err != nil {
403416
return err
@@ -420,14 +433,14 @@ func waitForContainerRestart(ctx context.Context, f *framework.Framework, podCli
420433
}
421434
}
422435

423-
func waitForPodResizeActuation(ctx context.Context, f *framework.Framework, c clientset.Interface, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo) *v1.Pod {
436+
func waitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, isRollback bool) *v1.Pod {
424437
ginkgo.GinkgoHelper()
425438
var resizedPod *v1.Pod
426439
var pErr error
427440
timeouts := framework.NewTimeoutContext()
428441
// Wait for container restart
429442
gomega.Eventually(ctx, waitForContainerRestart, timeouts.PodStartShort, timeouts.Poll).
430-
WithArguments(f, podClient, pod, expectedContainers).
443+
WithArguments(podClient, pod, expectedContainers, initialContainers, isRollback).
431444
ShouldNot(gomega.HaveOccurred(), "failed waiting for expected container restart")
432445
// Verify Pod Containers Cgroup Values
433446
gomega.Eventually(ctx, verifyPodContainersCgroupValues, timeouts.PodStartShort, timeouts.Poll).
@@ -1285,13 +1298,12 @@ func doPodResizeTests() {
12851298
for idx := range tests {
12861299
tc := tests[idx]
12871300
ginkgo.It(tc.name, func(ctx context.Context) {
1288-
ginkgo.By("waiting for the node to be ready", func() {
1289-
if !supportsInPlacePodVerticalScaling(ctx, f) || framework.NodeOSDistroIs("windows") || isRunningOnArm64() {
1301+
ginkgo.By("check if in place pod vertical scaling is supported", func() {
1302+
if !isInPlacePodVerticalScalingSupportedByRuntime(ctx, f.ClientSet) || framework.NodeOSDistroIs("windows") {
12901303
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
12911304
}
12921305
})
1293-
var testPod *v1.Pod
1294-
var patchedPod *v1.Pod
1306+
var testPod, patchedPod *v1.Pod
12951307
var pErr error
12961308

12971309
tStamp := strconv.Itoa(time.Now().Nanosecond())
@@ -1322,9 +1334,8 @@ func doPodResizeTests() {
13221334
ginkgo.By("verifying initial pod resize policy is as expected")
13231335
verifyPodResizePolicy(newPod, tc.containers)
13241336

1325-
ginkgo.By("verifying initial pod status resources")
1337+
ginkgo.By("verifying initial pod status resources are as expected")
13261338
verifyPodStatusResources(newPod, tc.containers)
1327-
13281339
ginkgo.By("verifying initial cgroup config are as expected")
13291340
framework.ExpectNoError(verifyPodContainersCgroupValues(ctx, f, newPod, tc.containers))
13301341

@@ -1409,8 +1420,8 @@ func doPodResizeErrorTests() {
14091420
for idx := range tests {
14101421
tc := tests[idx]
14111422
ginkgo.It(tc.name, func(ctx context.Context) {
1412-
ginkgo.By("waiting for the node to be ready", func() {
1413-
if !supportsInPlacePodVerticalScaling(ctx, f) || framework.NodeOSDistroIs("windows") || isRunningOnArm64() {
1423+
ginkgo.By("check if in place pod vertical scaling is supported", func() {
1424+
if !isInPlacePodVerticalScalingSupportedByRuntime(ctx, f.ClientSet) || framework.NodeOSDistroIs("windows") {
14141425
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
14151426
}
14161427
})
@@ -1426,10 +1437,6 @@ func doPodResizeErrorTests() {
14261437
ginkgo.By("creating pod")
14271438
newPod := podClient.CreateSync(ctx, testPod)
14281439

1429-
perr := e2epod.WaitForPodCondition(ctx, f.ClientSet, newPod.Namespace, newPod.Name, "Ready", timeouts.PodStartSlow, testutils.PodRunningReady)
1430-
framework.ExpectNoError(perr, "pod %s/%s did not go running", newPod.Namespace, newPod.Name)
1431-
framework.Logf("pod %s/%s running", newPod.Namespace, newPod.Name)
1432-
14331440
ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
14341441
verifyPodResources(newPod, tc.containers)
14351442
verifyPodResizePolicy(newPod, tc.containers)
@@ -1469,12 +1476,7 @@ func doPodResizeErrorTests() {
14691476
// Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests()
14701477
// in test/e2e/node/pod_resize.go
14711478

1472-
var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, "[NodeAlphaFeature:InPlacePodVerticalScaling]", func() {
1473-
if !podOnCgroupv2Node {
1474-
cgroupMemLimit = CgroupMemLimit
1475-
cgroupCPULimit = CgroupCPUQuota
1476-
cgroupCPURequest = CgroupCPUShares
1477-
}
1479+
var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, func() {
14781480
doPodResizeTests()
14791481
doPodResizeErrorTests()
14801482
})

0 commit comments

Comments
 (0)