@@ -597,9 +597,9 @@ func (m *ManagerImpl) readCheckpoint() error {
597
597
return nil
598
598
}
599
599
600
- // updateAllocatedDevices gets a list of active pods and then frees any Devices that are bound to
601
- // terminated pods. Returns error on failure.
602
- func ( m * ManagerImpl ) updateAllocatedDevices ( activePods [] * v1. Pod ) {
600
+ // UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
601
+ func ( m * ManagerImpl ) UpdateAllocatedDevices () {
602
+ activePods := m . activePods ()
603
603
if ! m .sourcesReady .AllReady () {
604
604
return
605
605
}
@@ -773,7 +773,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
773
773
// Updates allocatedDevices to garbage collect any stranded resources
774
774
// before doing the device plugin allocation.
775
775
if ! allocatedDevicesUpdated {
776
- m .updateAllocatedDevices ( m . activePods () )
776
+ m .UpdateAllocatedDevices ( )
777
777
allocatedDevicesUpdated = true
778
778
}
779
779
allocDevices , err := m .devicesToAllocate (podUID , contName , resource , needed , devicesToReuse [resource ])
@@ -788,7 +788,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
788
788
// Manager.Allocate involves RPC calls to device plugin, which
789
789
// could be heavy-weight. Therefore we want to perform this operation outside
790
790
// mutex lock. Note if Allocate call fails, we may leave container resources
791
- // partially allocated for the failed container. We rely on updateAllocatedDevices ()
791
+ // partially allocated for the failed container. We rely on UpdateAllocatedDevices ()
792
792
// to garbage collect these resources later. Another side effect is that if
793
793
// we have X resource A and Y resource B in total, and two containers, container1
794
794
// and container2 both require X resource A and Y resource B. Both allocation
0 commit comments