Skip to content

Commit a0bfe66

Browse files
authored
Merge pull request kubernetes#125202 from hjet/inplace-vertical-scaling-e2e-rollback
[FG:InPlacePodVerticalScaling] Roll back resize patches in doPodResizeTests
2 parents 9061523 + 30e395a commit a0bfe66

File tree

1 file changed

+90
-24
lines changed

1 file changed

+90
-24
lines changed

test/e2e/node/pod_resize.go

Lines changed: 90 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package node
1818

1919
import (
2020
"context"
21+
"encoding/json"
2122
"fmt"
2223
"regexp"
2324
"runtime"
@@ -81,6 +82,28 @@ type TestContainerInfo struct {
8182
RestartCount int32
8283
}
8384

85+
type containerPatch struct {
86+
Name string `json:"name"`
87+
Resources struct {
88+
Requests struct {
89+
CPU string `json:"cpu,omitempty"`
90+
Memory string `json:"memory,omitempty"`
91+
EphStor string `json:"ephemeral-storage,omitempty"`
92+
} `json:"requests"`
93+
Limits struct {
94+
CPU string `json:"cpu,omitempty"`
95+
Memory string `json:"memory,omitempty"`
96+
EphStor string `json:"ephemeral-storage,omitempty"`
97+
} `json:"limits"`
98+
} `json:"resources"`
99+
}
100+
101+
type patchSpec struct {
102+
Spec struct {
103+
Containers []containerPatch `json:"containers"`
104+
} `json:"spec"`
105+
}
106+
84107
func isInPlaceResizeSupportedByRuntime(c clientset.Interface, nodeName string) bool {
85108
//TODO(vinaykul,InPlacePodVerticalScaling): Can we optimize this?
86109
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
@@ -418,11 +441,18 @@ func verifyPodContainersCgroupValues(pod *v1.Pod, tcInfo []TestContainerInfo, fl
418441
return true
419442
}
420443

421-
func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo) *v1.Pod {
444+
func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, isRollback bool) *v1.Pod {
422445

423446
waitForContainerRestart := func() error {
424447
var restartContainersExpected []string
425-
for _, ci := range expectedContainers {
448+
449+
restartContainers := expectedContainers
450+
// if we're rolling back, extract restart counts from test case "expected" containers
451+
if isRollback {
452+
restartContainers = initialContainers
453+
}
454+
455+
for _, ci := range restartContainers {
426456
if ci.RestartCount > 0 {
427457
restartContainersExpected = append(restartContainersExpected, ci.Name)
428458
}
@@ -438,7 +468,12 @@ func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClien
438468
restartedContainersCount := 0
439469
for _, cName := range restartContainersExpected {
440470
cs, _ := podutil.GetContainerStatus(pod.Status.ContainerStatuses, cName)
441-
if cs.RestartCount < 1 {
471+
expectedRestarts := int32(1)
472+
// if we're rolling back, we should have 2 container restarts
473+
if isRollback {
474+
expectedRestarts = int32(2)
475+
}
476+
if cs.RestartCount < expectedRestarts {
442477
break
443478
}
444479
restartedContainersCount++
@@ -514,6 +549,28 @@ func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClien
514549
return resizedPod
515550
}
516551

552+
func genPatchString(containers []TestContainerInfo) (string, error) {
553+
var patch patchSpec
554+
555+
for _, container := range containers {
556+
var cPatch containerPatch
557+
cPatch.Name = container.Name
558+
cPatch.Resources.Requests.CPU = container.Resources.CPUReq
559+
cPatch.Resources.Requests.Memory = container.Resources.MemReq
560+
cPatch.Resources.Limits.CPU = container.Resources.CPULim
561+
cPatch.Resources.Limits.Memory = container.Resources.MemLim
562+
563+
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
564+
}
565+
566+
patchBytes, err := json.Marshal(patch)
567+
if err != nil {
568+
return "", err
569+
}
570+
571+
return string(patchBytes), nil
572+
}
573+
517574
func doPodResizeTests() {
518575
f := framework.NewDefaultFramework("pod-resize")
519576
var podClient *e2epod.PodClient
@@ -1263,31 +1320,40 @@ func doPodResizeTests() {
12631320
}
12641321
}
12651322

1266-
ginkgo.By("patching pod for resize")
1267-
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
1268-
types.StrategicMergePatchType, []byte(tc.patchString), metav1.PatchOptions{})
1269-
framework.ExpectNoError(pErr, "failed to patch pod for resize")
1270-
1271-
ginkgo.By("verifying pod patched for resize")
1272-
verifyPodResources(patchedPod, tc.expected)
1273-
verifyPodAllocations(patchedPod, tc.containers, true)
1323+
patchAndVerify := func(patchString string, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, opStr string, isRollback bool) {
1324+
ginkgo.By(fmt.Sprintf("patching pod for %s", opStr))
1325+
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
1326+
types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{})
1327+
framework.ExpectNoError(pErr, fmt.Sprintf("failed to patch pod for %s", opStr))
1328+
1329+
ginkgo.By(fmt.Sprintf("verifying pod patched for %s", opStr))
1330+
verifyPodResources(patchedPod, expectedContainers)
1331+
verifyPodAllocations(patchedPod, initialContainers, true)
1332+
1333+
ginkgo.By(fmt.Sprintf("waiting for %s to be actuated", opStr))
1334+
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod, patchedPod, expectedContainers, initialContainers, isRollback)
1335+
1336+
// Check cgroup values only for containerd versions before 1.6.9
1337+
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
1338+
ginkgo.By(fmt.Sprintf("verifying pod container's cgroup values after %s", opStr))
1339+
if !framework.NodeOSDistroIs("windows") {
1340+
verifyPodContainersCgroupValues(resizedPod, expectedContainers, true)
1341+
}
1342+
}
12741343

1275-
ginkgo.By("waiting for resize to be actuated")
1276-
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod, patchedPod, tc.expected)
1344+
ginkgo.By(fmt.Sprintf("verifying pod resources after %s", opStr))
1345+
verifyPodResources(resizedPod, expectedContainers)
12771346

1278-
// Check cgroup values only for containerd versions before 1.6.9
1279-
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
1280-
ginkgo.By("verifying pod container's cgroup values after resize")
1281-
if !framework.NodeOSDistroIs("windows") {
1282-
verifyPodContainersCgroupValues(resizedPod, tc.expected, true)
1283-
}
1347+
ginkgo.By(fmt.Sprintf("verifying pod allocations after %s", opStr))
1348+
verifyPodAllocations(resizedPod, expectedContainers, true)
12841349
}
12851350

1286-
ginkgo.By("verifying pod resources after resize")
1287-
verifyPodResources(resizedPod, tc.expected)
1351+
patchAndVerify(tc.patchString, tc.expected, tc.containers, "resize", false)
12881352

1289-
ginkgo.By("verifying pod allocations after resize")
1290-
verifyPodAllocations(resizedPod, tc.expected, true)
1353+
rbPatchStr, err := genPatchString(tc.containers)
1354+
framework.ExpectNoError(err)
1355+
// Resize has been actuated, test rollback
1356+
patchAndVerify(rbPatchStr, tc.containers, tc.expected, "rollback", true)
12911357

12921358
ginkgo.By("deleting pod")
12931359
err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod)
@@ -1372,7 +1438,7 @@ func doPodResizeResourceQuotaTests() {
13721438
verifyPodAllocations(patchedPod, containers, true)
13731439

13741440
ginkgo.By("waiting for resize to be actuated")
1375-
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod1, patchedPod, expected)
1441+
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod1, patchedPod, expected, containers, false)
13761442
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod1.Spec.NodeName) {
13771443
ginkgo.By("verifying pod container's cgroup values after resize")
13781444
if !framework.NodeOSDistroIs("windows") {

0 commit comments

Comments
 (0)