@@ -181,8 +181,8 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
181
181
req1Nic := testDeviceRequest {name : "req1Nic" , count : 1 , selectors : singleAttrSelector (exampleDriver , nicAttribute , nicTypeA )}
182
182
req1Global := testDeviceRequest {name : "req1Global" , count : 1 , selectors : singleAttrSelector (exampleDriver , globalDevAttribute , globalDevTypeA )}
183
183
184
- sharedGpuBClaim := testResourceClaim ("sharedGpuBClaim" , nil , "" , []testDeviceRequest {req1GpuB }, nil , nil )
185
- sharedAllocatedGlobalClaim := testResourceClaim ("sharedGlobalClaim" , nil , "" , []testDeviceRequest {req1Global }, []testAllocation {{request : req1Global .name , driver : exampleDriver , pool : "global-pool" , device : globalDevice + "-0" }}, nil )
184
+ sharedGpuBClaim := testResourceClaim ("sharedGpuBClaim" , nil , "" , []testDeviceRequest {req1GpuB }, nil )
185
+ sharedAllocatedGlobalClaim := testResourceClaim ("sharedGlobalClaim" , nil , "" , []testDeviceRequest {req1Global }, []testAllocation {{request : req1Global .name , driver : exampleDriver , pool : "global-pool" , device : globalDevice + "-0" }})
186
186
187
187
testCases := map [string ]struct {
188
188
nodeGroups map [* testNodeGroupDef ]int
@@ -250,10 +250,8 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
250
250
expectedScaleUps : map [string ]int {node1Gpu1Nic1slice .name : 3 },
251
251
},
252
252
"scale-up: scale from 0 nodes in a node group" : {
253
- nodeGroups : map [* testNodeGroupDef ]int {node1Gpu1Nic1slice : 0 },
254
- pods : append (
255
- unscheduledPods (baseSmallPod , "unschedulable" , 3 , []testDeviceRequest {req1GpuA , req1Nic }),
256
- ),
253
+ nodeGroups : map [* testNodeGroupDef ]int {node1Gpu1Nic1slice : 0 },
254
+ pods : unscheduledPods (baseSmallPod , "unschedulable" , 3 , []testDeviceRequest {req1GpuA , req1Nic }),
257
255
expectedScaleUps : map [string ]int {node1Gpu1Nic1slice .name : 3 },
258
256
},
259
257
"scale-up: scale from 0 nodes in a node group, with pods on the template nodes consuming DRA resources" : {
@@ -264,9 +262,7 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
264
262
scheduledPod (baseSmallPod , "template-1" , node3GpuA1slice .name + "-template" , map [* testDeviceRequest ][]string {& req1GpuA : {gpuDevice + "-1" }}),
265
263
},
266
264
},
267
- pods : append (
268
- unscheduledPods (baseSmallPod , "unschedulable" , 3 , []testDeviceRequest {req1GpuA }),
269
- ),
265
+ pods : unscheduledPods (baseSmallPod , "unschedulable" , 3 , []testDeviceRequest {req1GpuA }),
270
266
expectedScaleUps : map [string ]int {node3GpuA1slice .name : 3 },
271
267
},
272
268
"scale-up: scale from 0 nodes in a node group, with pods on the template nodes consuming DRA resources, including shared claims" : {
@@ -278,16 +274,12 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
278
274
scheduledPod (baseSmallPod , "template-1" , node3GpuA1slice .name + "-template" , map [* testDeviceRequest ][]string {& req1GpuA : {gpuDevice + "-1" }}, sharedAllocatedGlobalClaim ),
279
275
},
280
276
},
281
- pods : append (
282
- unscheduledPods (baseSmallPod , "unschedulable" , 3 , []testDeviceRequest {req1GpuA }, sharedAllocatedGlobalClaim ),
283
- ),
277
+ pods : unscheduledPods (baseSmallPod , "unschedulable" , 3 , []testDeviceRequest {req1GpuA }, sharedAllocatedGlobalClaim ),
284
278
expectedScaleUps : map [string ]int {node3GpuA1slice .name : 3 },
285
279
},
286
280
"no scale-up: pods requesting multiple different devices, but they're on different nodes" : {
287
281
nodeGroups : map [* testNodeGroupDef ]int {node1GpuA1slice : 1 , node1Nic1slice : 1 },
288
- pods : append (
289
- unscheduledPods (baseSmallPod , "unschedulable" , 3 , []testDeviceRequest {req1GpuA , req1Nic }),
290
- ),
282
+ pods : unscheduledPods (baseSmallPod , "unschedulable" , 3 , []testDeviceRequest {req1GpuA , req1Nic }),
291
283
},
292
284
"scale-up: pods requesting a shared, unallocated claim" : {
293
285
extraResourceClaims : []* resourceapi.ResourceClaim {sharedGpuBClaim },
@@ -597,13 +589,13 @@ func resourceClaimsForPod(pod *apiv1.Pod, nodeName string, claimCount int, reque
597
589
}
598
590
}
599
591
600
- claims = append (claims , testResourceClaim (name , pod , nodeName , claimRequests , claimAllocations , nil ))
592
+ claims = append (claims , testResourceClaim (name , pod , nodeName , claimRequests , claimAllocations ))
601
593
}
602
594
603
595
return claims
604
596
}
605
597
606
- func testResourceClaim (claimName string , owningPod * apiv1.Pod , nodeName string , requests []testDeviceRequest , allocations []testAllocation , reservedFor [] * apiv1. Pod ) * resourceapi.ResourceClaim {
598
+ func testResourceClaim (claimName string , owningPod * apiv1.Pod , nodeName string , requests []testDeviceRequest , allocations []testAllocation ) * resourceapi.ResourceClaim {
607
599
var deviceRequests []resourceapi.DeviceRequest
608
600
for _ , request := range requests {
609
601
var selectors []resourceapi.DeviceSelector
@@ -673,15 +665,6 @@ func testResourceClaim(claimName string, owningPod *apiv1.Pod, nodeName string,
673
665
UID : owningPod .UID ,
674
666
},
675
667
}
676
- } else {
677
- for _ , pod := range podReservations {
678
- podReservations = append (podReservations , resourceapi.ResourceClaimConsumerReference {
679
- APIGroup : "" ,
680
- Resource : "pods" ,
681
- Name : pod .Name ,
682
- UID : pod .UID ,
683
- })
684
- }
685
668
}
686
669
claim .Status = resourceapi.ResourceClaimStatus {
687
670
Allocation : & resourceapi.AllocationResult {
0 commit comments