@@ -468,7 +468,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
468
468
eI , ok := m .endpoints [resourceName ]
469
469
if (ok && eI .e .stopGracePeriodExpired ()) || ! ok {
470
470
if ! ok {
471
- klog .InfoS ("Unexpected: unhealthyDevices and endpoints are out of sync" )
471
+ klog .InfoS ("Unexpected: unhealthyDevices and endpoints became out of sync" )
472
472
}
473
473
delete (m .endpoints , resourceName )
474
474
delete (m .unhealthyDevices , resourceName )
@@ -484,7 +484,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
484
484
m .mutex .Unlock ()
485
485
if needsUpdateCheckpoint {
486
486
if err := m .writeCheckpoint (); err != nil {
487
- klog .ErrorS (err , "Error on writing checkpoint" )
487
+ klog .ErrorS (err , "Failed to write checkpoint file " )
488
488
}
489
489
}
490
490
return capacity , allocatable , deletedResources .UnsortedList ()
@@ -506,7 +506,7 @@ func (m *ManagerImpl) writeCheckpoint() error {
506
506
klog .ErrorS (err , "Failed to write checkpoint file" )
507
507
return err2
508
508
}
509
- klog .V (4 ).InfoS ("checkpoint file written" , "checkpoint" , kubeletDeviceManagerCheckpoint )
509
+ klog .V (4 ).InfoS ("Checkpoint file written" , "checkpoint" , kubeletDeviceManagerCheckpoint )
510
510
return nil
511
511
}
512
512
@@ -536,7 +536,7 @@ func (m *ManagerImpl) readCheckpoint() error {
536
536
m .endpoints [resource ] = endpointInfo {e : newStoppedEndpointImpl (resource ), opts : nil }
537
537
}
538
538
539
- klog .V (4 ).InfoS ("read data from checkpoint file" , "checkpoint" , kubeletDeviceManagerCheckpoint )
539
+ klog .V (4 ).InfoS ("Read data from checkpoint file" , "checkpoint" , kubeletDeviceManagerCheckpoint )
540
540
return nil
541
541
}
542
542
@@ -599,7 +599,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
599
599
// running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be
600
600
// because it already has access to all the required devices, so we got nothing to do and we can bail out.
601
601
if ! m .sourcesReady .AllReady () && m .isContainerAlreadyRunning (podUID , contName ) {
602
- klog .V (3 ).InfoS ("container detected running, nothing to do" , "deviceNumber" , needed , "resourceName" , resource , "podUID" , podUID , "containerName" , contName )
602
+ klog .V (3 ).InfoS ("Container detected running, nothing to do" , "deviceNumber" , needed , "resourceName" , resource , "podUID" , podUID , "containerName" , contName )
603
603
return nil , nil
604
604
}
605
605
@@ -630,7 +630,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
630
630
// We handled the known error paths in scenario 3 (node reboot), so from now on we can fall back in a common path.
631
631
// We cover container restart on kubelet steady state with the same flow.
632
632
if needed == 0 {
633
- klog .V (3 ).InfoS ("no devices needed, nothing to do" , "deviceNumber" , needed , "resourceName" , resource , "podUID" , podUID , "containerName" , contName )
633
+ klog .V (3 ).InfoS ("No devices needed, nothing to do" , "deviceNumber" , needed , "resourceName" , resource , "podUID" , podUID , "containerName" , contName )
634
634
// No change, no work.
635
635
return nil , nil
636
636
}
@@ -955,7 +955,7 @@ func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co
955
955
}
956
956
957
957
if ! m .checkPodActive (pod ) {
958
- klog .V (5 ).InfoS ("pod deleted from activePods, skip to reAllocate" , "pod" , klog .KObj (pod ), "podUID" , podUID , "containerName" , container .Name )
958
+ klog .V (5 ).InfoS ("Pod deleted from activePods, skip to reAllocate" , "pod" , klog .KObj (pod ), "podUID" , podUID , "containerName" , container .Name )
959
959
continue
960
960
}
961
961
@@ -1170,19 +1170,19 @@ func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool {
1170
1170
func (m * ManagerImpl ) isContainerAlreadyRunning (podUID , cntName string ) bool {
1171
1171
cntID , err := m .containerMap .GetContainerID (podUID , cntName )
1172
1172
if err != nil {
1173
- klog .ErrorS (err , "container not found in the initial map, assumed NOT running" , "podUID" , podUID , "containerName" , cntName )
1173
+ klog .ErrorS (err , "Container not found in the initial map, assumed NOT running" , "podUID" , podUID , "containerName" , cntName )
1174
1174
return false
1175
1175
}
1176
1176
1177
1177
// note that if container runtime is down when kubelet restarts, this set will be empty,
1178
1178
// so on kubelet restart containers will again fail admission, hitting https://github.com/kubernetes/kubernetes/issues/118559 again.
1179
1179
// This scenario should however be rare enough.
1180
1180
if ! m .containerRunningSet .Has (cntID ) {
1181
- klog .V (4 ).InfoS ("container not present in the initial running set" , "podUID" , podUID , "containerName" , cntName , "containerID" , cntID )
1181
+ klog .V (4 ).InfoS ("Container not present in the initial running set" , "podUID" , podUID , "containerName" , cntName , "containerID" , cntID )
1182
1182
return false
1183
1183
}
1184
1184
1185
1185
// Once we make it here we know we have a running container.
1186
- klog .V (4 ).InfoS ("container found in the initial set, assumed running" , "podUID" , podUID , "containerName" , cntName , "containerID" , cntID )
1186
+ klog .V (4 ).InfoS ("Container found in the initial set, assumed running" , "podUID" , podUID , "containerName" , cntName , "containerID" , cntID )
1187
1187
return true
1188
1188
}
0 commit comments