@@ -18,6 +18,7 @@ package node
18
18
19
19
import (
20
20
"context"
21
+ "encoding/json"
21
22
"fmt"
22
23
"regexp"
23
24
"runtime"
@@ -81,6 +82,28 @@ type TestContainerInfo struct {
81
82
RestartCount int32
82
83
}
83
84
85
+ type containerPatch struct {
86
+ Name string `json:"name"`
87
+ Resources struct {
88
+ Requests struct {
89
+ CPU string `json:"cpu,omitempty"`
90
+ Memory string `json:"memory,omitempty"`
91
+ EphStor string `json:"ephemeral-storage,omitempty"`
92
+ } `json:"requests"`
93
+ Limits struct {
94
+ CPU string `json:"cpu,omitempty"`
95
+ Memory string `json:"memory,omitempty"`
96
+ EphStor string `json:"ephemeral-storage,omitempty"`
97
+ } `json:"limits"`
98
+ } `json:"resources"`
99
+ }
100
+
101
+ type patchSpec struct {
102
+ Spec struct {
103
+ Containers []containerPatch `json:"containers"`
104
+ } `json:"spec"`
105
+ }
106
+
84
107
func isInPlaceResizeSupportedByRuntime (c clientset.Interface , nodeName string ) bool {
85
108
//TODO(vinaykul,InPlacePodVerticalScaling): Can we optimize this?
86
109
node , err := c .CoreV1 ().Nodes ().Get (context .TODO (), nodeName , metav1.GetOptions {})
@@ -418,11 +441,18 @@ func verifyPodContainersCgroupValues(pod *v1.Pod, tcInfo []TestContainerInfo, fl
418
441
return true
419
442
}
420
443
421
- func waitForPodResizeActuation (c clientset.Interface , podClient * e2epod.PodClient , pod , patchedPod * v1.Pod , expectedContainers []TestContainerInfo ) * v1.Pod {
444
+ func waitForPodResizeActuation (c clientset.Interface , podClient * e2epod.PodClient , pod , patchedPod * v1.Pod , expectedContainers []TestContainerInfo , initialContainers [] TestContainerInfo , isRollback bool ) * v1.Pod {
422
445
423
446
waitForContainerRestart := func () error {
424
447
var restartContainersExpected []string
425
- for _ , ci := range expectedContainers {
448
+
449
+ restartContainers := expectedContainers
450
+ // if we're rolling back, extract restart counts from test case "expected" containers
451
+ if isRollback {
452
+ restartContainers = initialContainers
453
+ }
454
+
455
+ for _ , ci := range restartContainers {
426
456
if ci .RestartCount > 0 {
427
457
restartContainersExpected = append (restartContainersExpected , ci .Name )
428
458
}
@@ -438,7 +468,12 @@ func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClien
438
468
restartedContainersCount := 0
439
469
for _ , cName := range restartContainersExpected {
440
470
cs , _ := podutil .GetContainerStatus (pod .Status .ContainerStatuses , cName )
441
- if cs .RestartCount < 1 {
471
+ expectedRestarts := int32 (1 )
472
+ // if we're rolling back, we should have 2 container restarts
473
+ if isRollback {
474
+ expectedRestarts = int32 (2 )
475
+ }
476
+ if cs .RestartCount < expectedRestarts {
442
477
break
443
478
}
444
479
restartedContainersCount ++
@@ -514,6 +549,28 @@ func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClien
514
549
return resizedPod
515
550
}
516
551
552
+ func genPatchString (containers []TestContainerInfo ) (string , error ) {
553
+ var patch patchSpec
554
+
555
+ for _ , container := range containers {
556
+ var cPatch containerPatch
557
+ cPatch .Name = container .Name
558
+ cPatch .Resources .Requests .CPU = container .Resources .CPUReq
559
+ cPatch .Resources .Requests .Memory = container .Resources .MemReq
560
+ cPatch .Resources .Limits .CPU = container .Resources .CPULim
561
+ cPatch .Resources .Limits .Memory = container .Resources .MemLim
562
+
563
+ patch .Spec .Containers = append (patch .Spec .Containers , cPatch )
564
+ }
565
+
566
+ patchBytes , err := json .Marshal (patch )
567
+ if err != nil {
568
+ return "" , err
569
+ }
570
+
571
+ return string (patchBytes ), nil
572
+ }
573
+
517
574
func doPodResizeTests () {
518
575
f := framework .NewDefaultFramework ("pod-resize" )
519
576
var podClient * e2epod.PodClient
@@ -1263,31 +1320,40 @@ func doPodResizeTests() {
1263
1320
}
1264
1321
}
1265
1322
1266
- ginkgo .By ("patching pod for resize" )
1267
- patchedPod , pErr = f .ClientSet .CoreV1 ().Pods (newPod .Namespace ).Patch (context .TODO (), newPod .Name ,
1268
- types .StrategicMergePatchType , []byte (tc .patchString ), metav1.PatchOptions {})
1269
- framework .ExpectNoError (pErr , "failed to patch pod for resize" )
1270
-
1271
- ginkgo .By ("verifying pod patched for resize" )
1272
- verifyPodResources (patchedPod , tc .expected )
1273
- verifyPodAllocations (patchedPod , tc .containers , true )
1323
+ patchAndVerify := func (patchString string , expectedContainers []TestContainerInfo , initialContainers []TestContainerInfo , opStr string , isRollback bool ) {
1324
+ ginkgo .By (fmt .Sprintf ("patching pod for %s" , opStr ))
1325
+ patchedPod , pErr = f .ClientSet .CoreV1 ().Pods (newPod .Namespace ).Patch (context .TODO (), newPod .Name ,
1326
+ types .StrategicMergePatchType , []byte (patchString ), metav1.PatchOptions {})
1327
+ framework .ExpectNoError (pErr , fmt .Sprintf ("failed to patch pod for %s" , opStr ))
1328
+
1329
+ ginkgo .By (fmt .Sprintf ("verifying pod patched for %s" , opStr ))
1330
+ verifyPodResources (patchedPod , expectedContainers )
1331
+ verifyPodAllocations (patchedPod , initialContainers , true )
1332
+
1333
+ ginkgo .By (fmt .Sprintf ("waiting for %s to be actuated" , opStr ))
1334
+ resizedPod := waitForPodResizeActuation (f .ClientSet , podClient , newPod , patchedPod , expectedContainers , initialContainers , isRollback )
1335
+
1336
+ // Check cgroup values only for containerd versions before 1.6.9
1337
+ if ! isInPlaceResizeSupportedByRuntime (f .ClientSet , newPod .Spec .NodeName ) {
1338
+ ginkgo .By (fmt .Sprintf ("verifying pod container's cgroup values after %s" , opStr ))
1339
+ if ! framework .NodeOSDistroIs ("windows" ) {
1340
+ verifyPodContainersCgroupValues (resizedPod , expectedContainers , true )
1341
+ }
1342
+ }
1274
1343
1275
- ginkgo .By ("waiting for resize to be actuated" )
1276
- resizedPod := waitForPodResizeActuation ( f . ClientSet , podClient , newPod , patchedPod , tc . expected )
1344
+ ginkgo .By (fmt . Sprintf ( "verifying pod resources after %s" , opStr ) )
1345
+ verifyPodResources ( resizedPod , expectedContainers )
1277
1346
1278
- // Check cgroup values only for containerd versions before 1.6.9
1279
- if ! isInPlaceResizeSupportedByRuntime (f .ClientSet , newPod .Spec .NodeName ) {
1280
- ginkgo .By ("verifying pod container's cgroup values after resize" )
1281
- if ! framework .NodeOSDistroIs ("windows" ) {
1282
- verifyPodContainersCgroupValues (resizedPod , tc .expected , true )
1283
- }
1347
+ ginkgo .By (fmt .Sprintf ("verifying pod allocations after %s" , opStr ))
1348
+ verifyPodAllocations (resizedPod , expectedContainers , true )
1284
1349
}
1285
1350
1286
- ginkgo .By ("verifying pod resources after resize" )
1287
- verifyPodResources (resizedPod , tc .expected )
1351
+ patchAndVerify (tc .patchString , tc .expected , tc .containers , "resize" , false )
1288
1352
1289
- ginkgo .By ("verifying pod allocations after resize" )
1290
- verifyPodAllocations (resizedPod , tc .expected , true )
1353
+ rbPatchStr , err := genPatchString (tc .containers )
1354
+ framework .ExpectNoError (err )
1355
+ // Resize has been actuated, test rollback
1356
+ patchAndVerify (rbPatchStr , tc .containers , tc .expected , "rollback" , true )
1291
1357
1292
1358
ginkgo .By ("deleting pod" )
1293
1359
err = e2epod .DeletePodWithWait (ctx , f .ClientSet , newPod )
@@ -1372,7 +1438,7 @@ func doPodResizeResourceQuotaTests() {
1372
1438
verifyPodAllocations (patchedPod , containers , true )
1373
1439
1374
1440
ginkgo .By ("waiting for resize to be actuated" )
1375
- resizedPod := waitForPodResizeActuation (f .ClientSet , podClient , newPod1 , patchedPod , expected )
1441
+ resizedPod := waitForPodResizeActuation (f .ClientSet , podClient , newPod1 , patchedPod , expected , containers , false )
1376
1442
if ! isInPlaceResizeSupportedByRuntime (f .ClientSet , newPod1 .Spec .NodeName ) {
1377
1443
ginkgo .By ("verifying pod container's cgroup values after resize" )
1378
1444
if ! framework .NodeOSDistroIs ("windows" ) {
0 commit comments