Skip to content

Commit 0130ebb

Browse files
committed
DRA scheduler: refactor "allocated devices" lookup
The logic for skipping "admin access" was repeated in three different places. A single foreachAllocatedDevices with a callback puts it into one function.
1 parent bd7ff9c commit 0130ebb

File tree

2 files changed

+36
-30
lines changed

2 files changed

+36
-30
lines changed

pkg/scheduler/framework/plugins/dynamicresources/allocateddevices.go

Lines changed: 34 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,36 @@ import (
2828
"k8s.io/utils/ptr"
2929
)
3030

31+
// foreachAllocatedDevice invokes the provided callback for each
32+
// device in the claim's allocation result which was allocated
33+
// exclusively for the claim.
34+
//
35+
// Devices allocated with admin access can be shared with other
36+
// claims and are skipped without invoking the callback.
37+
//
38+
// foreachAllocatedDevice does nothing if the claim is not allocated.
39+
func foreachAllocatedDevice(claim *resourceapi.ResourceClaim, cb func(deviceID structured.DeviceID)) {
40+
if claim.Status.Allocation == nil {
41+
return
42+
}
43+
for _, result := range claim.Status.Allocation.Devices.Results {
44+
// Kubernetes 1.31 did not set this, 1.32 always does.
45+
// Supporting 1.31 is not worth the additional code that
46+
// would have to be written (= looking up in request) because
47+
// it is extremely unlikely that there really is a result
48+
// that still exists in a cluster from 1.31 where this matters.
49+
if ptr.Deref(result.AdminAccess, false) {
50+
// Is not considered as allocated.
51+
continue
52+
}
53+
deviceID := structured.MakeDeviceID(result.Driver, result.Pool, result.Device)
54+
55+
// None of the users of this helper need to abort iterating,
56+
// therefore it's not supported as it only would add overhead.
57+
cb(deviceID)
58+
}
59+
}
60+
3161
// allocatedDevices reacts to events in a cache and maintains a set of all allocated devices.
3262
// This is cheaper than repeatedly calling List, making strings unique, and building the set
3363
// each time PreFilter is called.
@@ -112,16 +142,10 @@ func (a *allocatedDevices) addDevices(claim *resourceapi.ResourceClaim) {
112142
// Locking of the mutex gets minimized by pre-computing what needs to be done
113143
// without holding the lock.
114144
deviceIDs := make([]structured.DeviceID, 0, 20)
115-
116-
for _, result := range claim.Status.Allocation.Devices.Results {
117-
if ptr.Deref(result.AdminAccess, false) {
118-
// Is not considered as allocated.
119-
continue
120-
}
121-
deviceID := structured.MakeDeviceID(result.Driver, result.Pool, result.Device)
145+
foreachAllocatedDevice(claim, func(deviceID structured.DeviceID) {
122146
a.logger.V(6).Info("Observed device allocation", "device", deviceID, "claim", klog.KObj(claim))
123147
deviceIDs = append(deviceIDs, deviceID)
124-
}
148+
})
125149

126150
a.mutex.Lock()
127151
defer a.mutex.Unlock()
@@ -138,17 +162,10 @@ func (a *allocatedDevices) removeDevices(claim *resourceapi.ResourceClaim) {
138162
// Locking of the mutex gets minimized by pre-computing what needs to be done
139163
// without holding the lock.
140164
deviceIDs := make([]structured.DeviceID, 0, 20)
141-
142-
for _, result := range claim.Status.Allocation.Devices.Results {
143-
if ptr.Deref(result.AdminAccess, false) {
144-
// Is not considered as allocated and thus does not need to be removed
145-
// because of this claim.
146-
continue
147-
}
148-
deviceID := structured.MakeDeviceID(result.Driver, result.Pool, result.Device)
165+
foreachAllocatedDevice(claim, func(deviceID structured.DeviceID) {
149166
a.logger.V(6).Info("Observed device deallocation", "device", deviceID, "claim", klog.KObj(claim))
150167
deviceIDs = append(deviceIDs, deviceID)
151-
}
168+
})
152169

153170
a.mutex.Lock()
154171
defer a.mutex.Unlock()

pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ import (
4646
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
4747
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
4848
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
49-
"k8s.io/utils/ptr"
5049
)
5150

5251
const (
@@ -571,20 +570,10 @@ func (pl *DynamicResources) listAllAllocatedDevices(logger klog.Logger) sets.Set
571570
// Whatever is in flight also has to be checked.
572571
pl.inFlightAllocations.Range(func(key, value any) bool {
573572
claim := value.(*resourceapi.ResourceClaim)
574-
for _, result := range claim.Status.Allocation.Devices.Results {
575-
// Kubernetes 1.31 did not set this, 1.32 always does.
576-
// Supporting 1.31 is not worth the additional code that
577-
// would have to be written (= looking up in request) because
578-
// it is extremely unlikely that there really is a result
579-
// that still exists in a cluster from 1.31 where this matters.
580-
if ptr.Deref(result.AdminAccess, false) {
581-
// Is not considered as allocated.
582-
continue
583-
}
584-
deviceID := structured.MakeDeviceID(result.Driver, result.Pool, result.Device)
573+
foreachAllocatedDevice(claim, func(deviceID structured.DeviceID) {
585574
logger.V(6).Info("Device is in flight for allocation", "device", deviceID, "claim", klog.KObj(claim))
586575
allocated.Insert(deviceID)
587-
}
576+
})
588577
return true
589578
})
590579
return allocated

0 commit comments

Comments
 (0)