@@ -31,6 +31,7 @@ import (
31
31
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
32
32
"k8s.io/apimachinery/pkg/labels"
33
33
"k8s.io/apimachinery/pkg/types"
34
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
34
35
clientset "k8s.io/client-go/kubernetes"
35
36
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
36
37
resourceapi "k8s.io/kubernetes/pkg/api/v1/resource"
@@ -63,14 +64,16 @@ const (
63
64
64
65
PollInterval time.Duration = 2 * time .Second
65
66
PollTimeout time.Duration = 4 * time .Minute
67
+
68
+ fakeExtendedResource = "dummy.com/dummy"
66
69
)
67
70
68
71
type ContainerResources struct {
69
- CPUReq , CPULim , MemReq , MemLim , EphStorReq , EphStorLim string
72
+ CPUReq , CPULim , MemReq , MemLim , EphStorReq , EphStorLim , ExtendedResourceReq , ExtendedResourceLim string
70
73
}
71
74
72
75
type ContainerAllocations struct {
73
- CPUAlloc , MemAlloc , ephStorAlloc string
76
+ CPUAlloc , MemAlloc , ephStorAlloc , ExtendedResourceAlloc string
74
77
}
75
78
76
79
type TestContainerInfo struct {
@@ -146,6 +149,9 @@ func getTestResourceInfo(tcInfo TestContainerInfo) (v1.ResourceRequirements, v1.
146
149
if tcInfo .Resources .EphStorLim != "" {
147
150
lim [v1 .ResourceEphemeralStorage ] = resource .MustParse (tcInfo .Resources .EphStorLim )
148
151
}
152
+ if tcInfo .Resources .ExtendedResourceLim != "" {
153
+ lim [fakeExtendedResource ] = resource .MustParse (tcInfo .Resources .ExtendedResourceLim )
154
+ }
149
155
if tcInfo .Resources .CPUReq != "" {
150
156
req [v1 .ResourceCPU ] = resource .MustParse (tcInfo .Resources .CPUReq )
151
157
}
@@ -155,6 +161,9 @@ func getTestResourceInfo(tcInfo TestContainerInfo) (v1.ResourceRequirements, v1.
155
161
if tcInfo .Resources .EphStorReq != "" {
156
162
req [v1 .ResourceEphemeralStorage ] = resource .MustParse (tcInfo .Resources .EphStorReq )
157
163
}
164
+ if tcInfo .Resources .ExtendedResourceReq != "" {
165
+ req [fakeExtendedResource ] = resource .MustParse (tcInfo .Resources .ExtendedResourceReq )
166
+ }
158
167
res = v1.ResourceRequirements {Limits : lim , Requests : req }
159
168
}
160
169
if tcInfo .Allocations != nil {
@@ -168,7 +177,9 @@ func getTestResourceInfo(tcInfo TestContainerInfo) (v1.ResourceRequirements, v1.
168
177
if tcInfo .Allocations .ephStorAlloc != "" {
169
178
alloc [v1 .ResourceEphemeralStorage ] = resource .MustParse (tcInfo .Allocations .ephStorAlloc )
170
179
}
171
-
180
+ if tcInfo .Allocations .ExtendedResourceAlloc != "" {
181
+ alloc [fakeExtendedResource ] = resource .MustParse (tcInfo .Allocations .ExtendedResourceAlloc )
182
+ }
172
183
}
173
184
if tcInfo .CPUPolicy != nil {
174
185
cpuPol := v1.ContainerResizePolicy {ResourceName : v1 .ResourceCPU , RestartPolicy : * tcInfo .CPUPolicy }
@@ -318,7 +329,8 @@ func verifyPodAllocations(pod *v1.Pod, tcInfo []TestContainerInfo, flagError boo
318
329
cStatus := cStatusMap [ci .Name ]
319
330
if ci .Allocations == nil {
320
331
if ci .Resources != nil {
321
- alloc := & ContainerAllocations {CPUAlloc : ci .Resources .CPUReq , MemAlloc : ci .Resources .MemReq }
332
+ alloc := & ContainerAllocations {CPUAlloc : ci .Resources .CPUReq , MemAlloc : ci .Resources .MemReq ,
333
+ ExtendedResourceAlloc : ci .Resources .ExtendedResourceReq }
322
334
ci .Allocations = alloc
323
335
defer func () {
324
336
ci .Allocations = nil
@@ -571,18 +583,92 @@ func genPatchString(containers []TestContainerInfo) (string, error) {
571
583
return string (patchBytes ), nil
572
584
}
573
585
586
+ func patchNode (ctx context.Context , client clientset.Interface , old * v1.Node , new * v1.Node ) error {
587
+ oldData , err := json .Marshal (old )
588
+ if err != nil {
589
+ return err
590
+ }
591
+
592
+ newData , err := json .Marshal (new )
593
+ if err != nil {
594
+ return err
595
+ }
596
+ patchBytes , err := strategicpatch .CreateTwoWayMergePatch (oldData , newData , & v1.Node {})
597
+ if err != nil {
598
+ return fmt .Errorf ("failed to create merge patch for node %q: %w" , old .Name , err )
599
+ }
600
+ _ , err = client .CoreV1 ().Nodes ().Patch (ctx , old .Name , types .StrategicMergePatchType , patchBytes , metav1.PatchOptions {}, "status" )
601
+ return err
602
+ }
603
+
604
+ func addExtendedResource (clientSet clientset.Interface , nodeName , extendedResourceName string , extendedResourceQuantity resource.Quantity ) {
605
+ extendedResource := v1 .ResourceName (extendedResourceName )
606
+
607
+ ginkgo .By ("Adding a custom resource" )
608
+ OriginalNode , err := clientSet .CoreV1 ().Nodes ().Get (context .Background (), nodeName , metav1.GetOptions {})
609
+ framework .ExpectNoError (err )
610
+
611
+ node := OriginalNode .DeepCopy ()
612
+ node .Status .Capacity [extendedResource ] = extendedResourceQuantity
613
+ node .Status .Allocatable [extendedResource ] = extendedResourceQuantity
614
+ err = patchNode (context .Background (), clientSet , OriginalNode .DeepCopy (), node )
615
+ framework .ExpectNoError (err )
616
+
617
+ gomega .Eventually (func () error {
618
+ node , err = clientSet .CoreV1 ().Nodes ().Get (context .Background (), node .Name , metav1.GetOptions {})
619
+ framework .ExpectNoError (err )
620
+
621
+ fakeResourceCapacity , exists := node .Status .Capacity [extendedResource ]
622
+ if ! exists {
623
+ return fmt .Errorf ("node %s has no %s resource capacity" , node .Name , extendedResourceName )
624
+ }
625
+ if expectedResource := resource .MustParse ("123" ); fakeResourceCapacity .Cmp (expectedResource ) != 0 {
626
+ return fmt .Errorf ("node %s has resource capacity %s, expected: %s" , node .Name , fakeResourceCapacity .String (), expectedResource .String ())
627
+ }
628
+
629
+ return nil
630
+ }).WithTimeout (30 * time .Second ).WithPolling (time .Second ).ShouldNot (gomega .HaveOccurred ())
631
+ }
632
+
633
+ func removeExtendedResource (clientSet clientset.Interface , nodeName , extendedResourceName string ) {
634
+ extendedResource := v1 .ResourceName (extendedResourceName )
635
+
636
+ ginkgo .By ("Removing a custom resource" )
637
+ originalNode , err := clientSet .CoreV1 ().Nodes ().Get (context .Background (), nodeName , metav1.GetOptions {})
638
+ framework .ExpectNoError (err )
639
+
640
+ node := originalNode .DeepCopy ()
641
+ delete (node .Status .Capacity , extendedResource )
642
+ delete (node .Status .Allocatable , extendedResource )
643
+ err = patchNode (context .Background (), clientSet , originalNode .DeepCopy (), node )
644
+ framework .ExpectNoError (err )
645
+
646
+ gomega .Eventually (func () error {
647
+ node , err = clientSet .CoreV1 ().Nodes ().Get (context .Background (), nodeName , metav1.GetOptions {})
648
+ framework .ExpectNoError (err )
649
+
650
+ if _ , exists := node .Status .Capacity [extendedResource ]; exists {
651
+ return fmt .Errorf ("node %s has resource capacity %s which is expected to be removed" , node .Name , extendedResourceName )
652
+ }
653
+
654
+ return nil
655
+ }).WithTimeout (30 * time .Second ).WithPolling (time .Second ).ShouldNot (gomega .HaveOccurred ())
656
+ }
657
+
574
658
func doPodResizeTests () {
575
659
f := framework .NewDefaultFramework ("pod-resize" )
576
660
var podClient * e2epod.PodClient
661
+
577
662
ginkgo .BeforeEach (func () {
578
663
podClient = e2epod .NewPodClient (f )
579
664
})
580
665
581
666
type testCase struct {
582
- name string
583
- containers []TestContainerInfo
584
- patchString string
585
- expected []TestContainerInfo
667
+ name string
668
+ containers []TestContainerInfo
669
+ patchString string
670
+ expected []TestContainerInfo
671
+ addExtendedResource bool
586
672
}
587
673
588
674
noRestart := v1 .NotRequired
@@ -1284,6 +1370,31 @@ func doPodResizeTests() {
1284
1370
},
1285
1371
},
1286
1372
},
1373
+ {
1374
+ name : "Guaranteed QoS pod, one container - increase CPU & memory with an extended resource" ,
1375
+ containers : []TestContainerInfo {
1376
+ {
1377
+ Name : "c1" ,
1378
+ Resources : & ContainerResources {CPUReq : "100m" , CPULim : "100m" , MemReq : "200Mi" , MemLim : "200Mi" ,
1379
+ ExtendedResourceReq : "1" , ExtendedResourceLim : "1" },
1380
+ CPUPolicy : & noRestart ,
1381
+ MemPolicy : & noRestart ,
1382
+ },
1383
+ },
1384
+ patchString : `{"spec":{"containers":[
1385
+ {"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
1386
+ ]}}` ,
1387
+ expected : []TestContainerInfo {
1388
+ {
1389
+ Name : "c1" ,
1390
+ Resources : & ContainerResources {CPUReq : "200m" , CPULim : "200m" , MemReq : "400Mi" , MemLim : "400Mi" ,
1391
+ ExtendedResourceReq : "1" , ExtendedResourceLim : "1" },
1392
+ CPUPolicy : & noRestart ,
1393
+ MemPolicy : & noRestart ,
1394
+ },
1395
+ },
1396
+ addExtendedResource : true ,
1397
+ },
1287
1398
}
1288
1399
1289
1400
for idx := range tests {
@@ -1297,6 +1408,20 @@ func doPodResizeTests() {
1297
1408
initDefaultResizePolicy (tc .expected )
1298
1409
testPod = makeTestPod (f .Namespace .Name , "testpod" , tStamp , tc .containers )
1299
1410
1411
+ if tc .addExtendedResource {
1412
+ nodes , err := e2enode .GetReadySchedulableNodes (context .Background (), f .ClientSet )
1413
+ framework .ExpectNoError (err )
1414
+
1415
+ for _ , node := range nodes .Items {
1416
+ addExtendedResource (f .ClientSet , node .Name , fakeExtendedResource , resource .MustParse ("123" ))
1417
+ }
1418
+ defer func () {
1419
+ for _ , node := range nodes .Items {
1420
+ removeExtendedResource (f .ClientSet , node .Name , fakeExtendedResource )
1421
+ }
1422
+ }()
1423
+ }
1424
+
1300
1425
ginkgo .By ("creating pod" )
1301
1426
newPod := podClient .CreateSync (ctx , testPod )
1302
1427
0 commit comments