@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
14
14
limitations under the License.
15
15
*/
16
16
17
- package e2enode
17
+ package node
18
18
19
19
import (
20
20
"context"
21
+ "encoding/json"
21
22
"fmt"
22
23
"regexp"
23
24
"strconv"
@@ -32,14 +33,16 @@ import (
32
33
"k8s.io/apimachinery/pkg/api/resource"
33
34
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
34
35
"k8s.io/apimachinery/pkg/types"
36
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
35
37
clientset "k8s.io/client-go/kubernetes"
38
+
36
39
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
37
40
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
38
41
"k8s.io/kubernetes/test/e2e/feature"
39
42
"k8s.io/kubernetes/test/e2e/framework"
43
+ e2enode "k8s.io/kubernetes/test/e2e/framework/node"
40
44
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
41
45
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
42
- testutils "k8s.io/kubernetes/test/utils"
43
46
imageutils "k8s.io/kubernetes/test/utils/image"
44
47
)
45
48
@@ -59,10 +62,7 @@ const (
59
62
)
60
63
61
64
var (
62
- podOnCgroupv2Node bool = IsCgroup2UnifiedMode ()
63
- cgroupMemLimit string = Cgroupv2MemLimit
64
- cgroupCPULimit string = Cgroupv2CPULimit
65
- cgroupCPURequest string = Cgroupv2CPURequest
65
+ podOnCgroupv2Node * bool
66
66
)
67
67
68
68
type ContainerResources struct {
@@ -114,16 +114,19 @@ type patchSpec struct {
114
114
} `json:"spec"`
115
115
}
116
116
117
- func supportsInPlacePodVerticalScaling (ctx context.Context , f * framework.Framework ) bool {
118
- node := getLocalNode (ctx , f )
117
+ func isInPlacePodVerticalScalingSupportedByRuntime (ctx context.Context , c clientset.Interface ) bool {
118
+ node , err := e2enode .GetRandomReadySchedulableNode (ctx , c )
119
+ framework .ExpectNoError (err )
119
120
re := regexp .MustCompile ("containerd://(.*)" )
120
121
match := re .FindStringSubmatch (node .Status .NodeInfo .ContainerRuntimeVersion )
121
122
if len (match ) != 2 {
122
123
return false
123
124
}
124
- // TODO(InPlacePodVerticalScaling): Update when RuntimeHandlerFeature for pod resize have been implemented
125
125
if ver , verr := semver .ParseTolerant (match [1 ]); verr == nil {
126
- return ver .Compare (semver .MustParse (MinContainerRuntimeVersion )) >= 0
126
+ if ver .Compare (semver .MustParse (MinContainerRuntimeVersion )) < 0 {
127
+ return false
128
+ }
129
+ return true
127
130
}
128
131
return false
129
132
}
@@ -222,15 +225,11 @@ func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStat
222
225
223
226
func makeTestPod (ns , name , timeStamp string , tcInfo []TestContainerInfo ) * v1.Pod {
224
227
var testContainers []v1.Container
225
- var podOS * v1.PodOS
226
228
227
229
for _ , ci := range tcInfo {
228
230
tc , _ := makeTestContainer (ci )
229
231
testContainers = append (testContainers , tc )
230
232
}
231
-
232
- podOS = & v1.PodOS {Name : v1 .Linux }
233
-
234
233
pod := & v1.Pod {
235
234
ObjectMeta : metav1.ObjectMeta {
236
235
Name : name ,
@@ -240,97 +239,103 @@ func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod
240
239
},
241
240
},
242
241
Spec : v1.PodSpec {
243
- OS : podOS ,
242
+ OS : & v1. PodOS { Name : v1 . Linux } ,
244
243
Containers : testContainers ,
245
244
RestartPolicy : v1 .RestartPolicyOnFailure ,
246
245
},
247
246
}
248
247
return pod
249
248
}
250
249
251
- func verifyPodResizePolicy (pod * v1.Pod , tcInfo []TestContainerInfo ) {
250
+ func verifyPodResizePolicy (gotPod * v1.Pod , wantCtrs []TestContainerInfo ) {
252
251
ginkgo .GinkgoHelper ()
253
- cMap := make (map [string ]* v1.Container )
254
- for i , c := range pod .Spec .Containers {
255
- cMap [c .Name ] = & pod .Spec .Containers [i ]
256
- }
257
- for _ , ci := range tcInfo {
258
- gomega .Expect (cMap ).Should (gomega .HaveKey (ci .Name ))
259
- c := cMap [ci .Name ]
260
- tc , _ := makeTestContainer (ci )
261
- gomega .Expect (tc .ResizePolicy ).To (gomega .Equal (c .ResizePolicy ))
252
+ for i , wantCtr := range wantCtrs {
253
+ gotCtr := & gotPod .Spec .Containers [i ]
254
+ ctr , _ := makeTestContainer (wantCtr )
255
+ gomega .Expect (gotCtr .Name ).To (gomega .Equal (ctr .Name ))
256
+ gomega .Expect (gotCtr .ResizePolicy ).To (gomega .Equal (ctr .ResizePolicy ))
262
257
}
263
258
}
264
259
265
- func verifyPodResources (pod * v1.Pod , tcInfo []TestContainerInfo ) {
260
+ func verifyPodResources (gotPod * v1.Pod , wantCtrs []TestContainerInfo ) {
266
261
ginkgo .GinkgoHelper ()
267
- cMap := make (map [string ]* v1.Container )
268
- for i , c := range pod .Spec .Containers {
269
- cMap [c .Name ] = & pod .Spec .Containers [i ]
270
- }
271
- for _ , ci := range tcInfo {
272
- gomega .Expect (cMap ).Should (gomega .HaveKey (ci .Name ))
273
- c := cMap [ci .Name ]
274
- tc , _ := makeTestContainer (ci )
275
- gomega .Expect (tc .Resources ).To (gomega .Equal (c .Resources ))
262
+ for i , wantCtr := range wantCtrs {
263
+ gotCtr := & gotPod .Spec .Containers [i ]
264
+ ctr , _ := makeTestContainer (wantCtr )
265
+ gomega .Expect (gotCtr .Name ).To (gomega .Equal (ctr .Name ))
266
+ gomega .Expect (gotCtr .Resources ).To (gomega .Equal (ctr .Resources ))
276
267
}
277
268
}
278
269
279
- func verifyPodAllocations (pod * v1.Pod , tcInfo []TestContainerInfo ) error {
270
+ func verifyPodAllocations (gotPod * v1.Pod , wantCtrs []TestContainerInfo ) error {
280
271
ginkgo .GinkgoHelper ()
281
- cStatusMap := make (map [string ]* v1.ContainerStatus )
282
- for i , c := range pod .Status .ContainerStatuses {
283
- cStatusMap [c .Name ] = & pod .Status .ContainerStatuses [i ]
284
- }
285
-
286
- for _ , ci := range tcInfo {
287
- gomega .Expect (cStatusMap ).Should (gomega .HaveKey (ci .Name ))
288
- cStatus := cStatusMap [ci .Name ]
289
- if ci .Allocations == nil {
290
- if ci .Resources != nil {
291
- alloc := & ContainerAllocations {CPUAlloc : ci .Resources .CPUReq , MemAlloc : ci .Resources .MemReq }
292
- ci .Allocations = alloc
272
+ for i , wantCtr := range wantCtrs {
273
+ gotCtrStatus := & gotPod .Status .ContainerStatuses [i ]
274
+ if wantCtr .Allocations == nil {
275
+ if wantCtr .Resources != nil {
276
+ alloc := & ContainerAllocations {CPUAlloc : wantCtr .Resources .CPUReq , MemAlloc : wantCtr .Resources .MemReq }
277
+ wantCtr .Allocations = alloc
293
278
defer func () {
294
- ci .Allocations = nil
279
+ wantCtr .Allocations = nil
295
280
}()
296
281
}
297
282
}
298
283
299
- _ , tcStatus := makeTestContainer (ci )
300
- if ! cmp .Equal (cStatus .AllocatedResources , tcStatus .AllocatedResources ) {
284
+ _ , ctrStatus := makeTestContainer (wantCtr )
285
+ gomega .Expect (gotCtrStatus .Name ).To (gomega .Equal (ctrStatus .Name ))
286
+ if ! cmp .Equal (gotCtrStatus .AllocatedResources , ctrStatus .AllocatedResources ) {
301
287
return fmt .Errorf ("failed to verify Pod allocations, allocated resources not equal to expected" )
302
288
}
303
289
}
304
290
return nil
305
291
}
306
292
307
- func verifyPodStatusResources (pod * v1.Pod , tcInfo []TestContainerInfo ) {
293
+ func verifyPodStatusResources (gotPod * v1.Pod , wantCtrs []TestContainerInfo ) {
308
294
ginkgo .GinkgoHelper ()
309
- csMap := make (map [string ]* v1.ContainerStatus )
310
- for i , c := range pod .Status .ContainerStatuses {
311
- csMap [c .Name ] = & pod .Status .ContainerStatuses [i ]
295
+ for i , wantCtr := range wantCtrs {
296
+ gotCtrStatus := & gotPod .Status .ContainerStatuses [i ]
297
+ ctr , _ := makeTestContainer (wantCtr )
298
+ gomega .Expect (gotCtrStatus .Name ).To (gomega .Equal (ctr .Name ))
299
+ gomega .Expect (ctr .Resources ).To (gomega .Equal (* gotCtrStatus .Resources ))
312
300
}
313
- for _ , ci := range tcInfo {
314
- gomega .Expect (csMap ).Should (gomega .HaveKey (ci .Name ))
315
- cs := csMap [ci .Name ]
316
- tc , _ := makeTestContainer (ci )
317
- gomega .Expect (tc .Resources ).To (gomega .Equal (* cs .Resources ))
301
+ }
302
+
303
+ func isPodOnCgroupv2Node (f * framework.Framework , pod * v1.Pod ) bool {
304
+ // Determine if pod is running on cgroupv2 or cgroupv1 node
305
+ //TODO(vinaykul,InPlacePodVerticalScaling): Is there a better way to determine this?
306
+ cmd := "mount -t cgroup2"
307
+ out , _ , err := e2epod .ExecCommandInContainerWithFullOutput (f , pod .Name , pod .Spec .Containers [0 ].Name , "/bin/sh" , "-c" , cmd )
308
+ if err != nil {
309
+ return false
318
310
}
311
+ return len (out ) != 0
319
312
}
320
313
321
314
func verifyPodContainersCgroupValues (ctx context.Context , f * framework.Framework , pod * v1.Pod , tcInfo []TestContainerInfo ) error {
322
315
ginkgo .GinkgoHelper ()
316
+ if podOnCgroupv2Node == nil {
317
+ value := isPodOnCgroupv2Node (f , pod )
318
+ podOnCgroupv2Node = & value
319
+ }
320
+ cgroupMemLimit := Cgroupv2MemLimit
321
+ cgroupCPULimit := Cgroupv2CPULimit
322
+ cgroupCPURequest := Cgroupv2CPURequest
323
+ if ! * podOnCgroupv2Node {
324
+ cgroupMemLimit = CgroupMemLimit
325
+ cgroupCPULimit = CgroupCPUQuota
326
+ cgroupCPURequest = CgroupCPUShares
327
+ }
323
328
verifyCgroupValue := func (cName , cgPath , expectedCgValue string ) error {
324
- mycmd := fmt .Sprintf ("head -n 1 %s" , cgPath )
325
- cgValue , _ , err := e2epod .ExecCommandInContainerWithFullOutput (f , pod .Name , cName , "/bin/sh" , "-c" , mycmd )
329
+ cmd := fmt .Sprintf ("head -n 1 %s" , cgPath )
326
330
framework .Logf ("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s" ,
327
331
pod .Namespace , pod .Name , cName , expectedCgValue , cgPath )
332
+ cgValue , _ , err := e2epod .ExecCommandInContainerWithFullOutput (f , pod .Name , cName , "/bin/sh" , "-c" , cmd )
328
333
if err != nil {
329
- return fmt .Errorf ("failed to find expected value '%s' in container cgroup '%s' " , expectedCgValue , cgPath )
334
+ return fmt .Errorf ("failed to find expected value %q in container cgroup %q " , expectedCgValue , cgPath )
330
335
}
331
336
cgValue = strings .Trim (cgValue , "\n " )
332
337
if cgValue != expectedCgValue {
333
- return fmt .Errorf ("cgroup value '%s' not equal to expected '%s' " , cgValue , expectedCgValue )
338
+ return fmt .Errorf ("cgroup value %q not equal to expected %q " , cgValue , expectedCgValue )
334
339
}
335
340
return nil
336
341
}
@@ -356,7 +361,7 @@ func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
356
361
}
357
362
expectedCPULimitString = strconv .FormatInt (cpuQuota , 10 )
358
363
expectedMemLimitString = strconv .FormatInt (expectedMemLimitInBytes , 10 )
359
- if podOnCgroupv2Node {
364
+ if * podOnCgroupv2Node {
360
365
if expectedCPULimitString == "-1" {
361
366
expectedCPULimitString = "max"
362
367
}
@@ -387,17 +392,25 @@ func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
387
392
return nil
388
393
}
389
394
390
- func waitForContainerRestart (ctx context.Context , f * framework. Framework , podClient * e2epod.PodClient , pod * v1.Pod , expectedContainers []TestContainerInfo ) error {
395
+ func waitForContainerRestart (ctx context.Context , podClient * e2epod.PodClient , pod * v1.Pod , expectedContainers []TestContainerInfo , initialContainers [] TestContainerInfo , isRollback bool ) error {
391
396
ginkgo .GinkgoHelper ()
392
397
var restartContainersExpected []string
393
- for _ , ci := range expectedContainers {
398
+
399
+ restartContainers := expectedContainers
400
+ // if we're rolling back, extract restart counts from test case "expected" containers
401
+ if isRollback {
402
+ restartContainers = initialContainers
403
+ }
404
+
405
+ for _ , ci := range restartContainers {
394
406
if ci .RestartCount > 0 {
395
407
restartContainersExpected = append (restartContainersExpected , ci .Name )
396
408
}
397
409
}
398
410
if len (restartContainersExpected ) == 0 {
399
411
return nil
400
412
}
413
+
401
414
pod , err := podClient .Get (ctx , pod .Name , metav1.GetOptions {})
402
415
if err != nil {
403
416
return err
@@ -420,14 +433,14 @@ func waitForContainerRestart(ctx context.Context, f *framework.Framework, podCli
420
433
}
421
434
}
422
435
423
- func waitForPodResizeActuation (ctx context.Context , f * framework.Framework , c clientset. Interface , podClient * e2epod.PodClient , pod , patchedPod * v1.Pod , expectedContainers []TestContainerInfo ) * v1.Pod {
436
+ func waitForPodResizeActuation (ctx context.Context , f * framework.Framework , podClient * e2epod.PodClient , pod , patchedPod * v1.Pod , expectedContainers []TestContainerInfo , initialContainers [] TestContainerInfo , isRollback bool ) * v1.Pod {
424
437
ginkgo .GinkgoHelper ()
425
438
var resizedPod * v1.Pod
426
439
var pErr error
427
440
timeouts := framework .NewTimeoutContext ()
428
441
// Wait for container restart
429
442
gomega .Eventually (ctx , waitForContainerRestart , timeouts .PodStartShort , timeouts .Poll ).
430
- WithArguments (f , podClient , pod , expectedContainers ).
443
+ WithArguments (podClient , pod , expectedContainers , initialContainers , isRollback ).
431
444
ShouldNot (gomega .HaveOccurred (), "failed waiting for expected container restart" )
432
445
// Verify Pod Containers Cgroup Values
433
446
gomega .Eventually (ctx , verifyPodContainersCgroupValues , timeouts .PodStartShort , timeouts .Poll ).
@@ -1285,13 +1298,12 @@ func doPodResizeTests() {
1285
1298
for idx := range tests {
1286
1299
tc := tests [idx ]
1287
1300
ginkgo .It (tc .name , func (ctx context.Context ) {
1288
- ginkgo .By ("waiting for the node to be ready " , func () {
1289
- if ! supportsInPlacePodVerticalScaling (ctx , f ) || framework .NodeOSDistroIs ("windows" ) || isRunningOnArm64 ( ) {
1301
+ ginkgo .By ("check if in place pod vertical scaling is supported " , func () {
1302
+ if ! isInPlacePodVerticalScalingSupportedByRuntime (ctx , f . ClientSet ) || framework .NodeOSDistroIs ("windows" ) {
1290
1303
e2eskipper .Skipf ("runtime does not support InPlacePodVerticalScaling -- skipping" )
1291
1304
}
1292
1305
})
1293
- var testPod * v1.Pod
1294
- var patchedPod * v1.Pod
1306
+ var testPod , patchedPod * v1.Pod
1295
1307
var pErr error
1296
1308
1297
1309
tStamp := strconv .Itoa (time .Now ().Nanosecond ())
@@ -1322,9 +1334,8 @@ func doPodResizeTests() {
1322
1334
ginkgo .By ("verifying initial pod resize policy is as expected" )
1323
1335
verifyPodResizePolicy (newPod , tc .containers )
1324
1336
1325
- ginkgo .By ("verifying initial pod status resources" )
1337
+ ginkgo .By ("verifying initial pod status resources are as expected " )
1326
1338
verifyPodStatusResources (newPod , tc .containers )
1327
-
1328
1339
ginkgo .By ("verifying initial cgroup config are as expected" )
1329
1340
framework .ExpectNoError (verifyPodContainersCgroupValues (ctx , f , newPod , tc .containers ))
1330
1341
@@ -1409,8 +1420,8 @@ func doPodResizeErrorTests() {
1409
1420
for idx := range tests {
1410
1421
tc := tests [idx ]
1411
1422
ginkgo .It (tc .name , func (ctx context.Context ) {
1412
- ginkgo .By ("waiting for the node to be ready " , func () {
1413
- if ! supportsInPlacePodVerticalScaling (ctx , f ) || framework .NodeOSDistroIs ("windows" ) || isRunningOnArm64 ( ) {
1423
+ ginkgo .By ("check if in place pod vertical scaling is supported " , func () {
1424
+ if ! isInPlacePodVerticalScalingSupportedByRuntime (ctx , f . ClientSet ) || framework .NodeOSDistroIs ("windows" ) {
1414
1425
e2eskipper .Skipf ("runtime does not support InPlacePodVerticalScaling -- skipping" )
1415
1426
}
1416
1427
})
@@ -1426,10 +1437,6 @@ func doPodResizeErrorTests() {
1426
1437
ginkgo .By ("creating pod" )
1427
1438
newPod := podClient .CreateSync (ctx , testPod )
1428
1439
1429
- perr := e2epod .WaitForPodCondition (ctx , f .ClientSet , newPod .Namespace , newPod .Name , "Ready" , timeouts .PodStartSlow , testutils .PodRunningReady )
1430
- framework .ExpectNoError (perr , "pod %s/%s did not go running" , newPod .Namespace , newPod .Name )
1431
- framework .Logf ("pod %s/%s running" , newPod .Namespace , newPod .Name )
1432
-
1433
1440
ginkgo .By ("verifying initial pod resources, allocations, and policy are as expected" )
1434
1441
verifyPodResources (newPod , tc .containers )
1435
1442
verifyPodResizePolicy (newPod , tc .containers )
@@ -1469,12 +1476,7 @@ func doPodResizeErrorTests() {
1469
1476
// Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests()
1470
1477
// in test/e2e/node/pod_resize.go
1471
1478
1472
- var _ = SIGDescribe ("Pod InPlace Resize Container" , framework .WithSerial (), feature .InPlacePodVerticalScaling , "[NodeAlphaFeature:InPlacePodVerticalScaling]" , func () {
1473
- if ! podOnCgroupv2Node {
1474
- cgroupMemLimit = CgroupMemLimit
1475
- cgroupCPULimit = CgroupCPUQuota
1476
- cgroupCPURequest = CgroupCPUShares
1477
- }
1479
+ var _ = SIGDescribe ("Pod InPlace Resize Container" , framework .WithSerial (), feature .InPlacePodVerticalScaling , func () {
1478
1480
doPodResizeTests ()
1479
1481
doPodResizeErrorTests ()
1480
1482
})
0 commit comments