Skip to content

Commit f449697

Browse files
committed
node: device-mgr: Adhere to the message style guidelines
Ensure that the log messages adhere to the message style guildelines as captured [here](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#message-style-guidelines). Signed-off-by: Swati Sehgal <[email protected]>
1 parent 40c86d8 commit f449697

File tree

4 files changed

+13
-13
lines changed

4 files changed

+13
-13
lines changed

pkg/kubelet/cm/devicemanager/manager.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -468,7 +468,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
468468
eI, ok := m.endpoints[resourceName]
469469
if (ok && eI.e.stopGracePeriodExpired()) || !ok {
470470
if !ok {
471-
klog.InfoS("Unexpected: unhealthyDevices and endpoints are out of sync")
471+
klog.InfoS("Unexpected: unhealthyDevices and endpoints became out of sync")
472472
}
473473
delete(m.endpoints, resourceName)
474474
delete(m.unhealthyDevices, resourceName)
@@ -484,7 +484,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
484484
m.mutex.Unlock()
485485
if needsUpdateCheckpoint {
486486
if err := m.writeCheckpoint(); err != nil {
487-
klog.ErrorS(err, "Error on writing checkpoint")
487+
klog.ErrorS(err, "Failed to write checkpoint file")
488488
}
489489
}
490490
return capacity, allocatable, deletedResources.UnsortedList()
@@ -506,7 +506,7 @@ func (m *ManagerImpl) writeCheckpoint() error {
506506
klog.ErrorS(err, "Failed to write checkpoint file")
507507
return err2
508508
}
509-
klog.V(4).InfoS("checkpoint file written", "checkpoint", kubeletDeviceManagerCheckpoint)
509+
klog.V(4).InfoS("Checkpoint file written", "checkpoint", kubeletDeviceManagerCheckpoint)
510510
return nil
511511
}
512512

@@ -536,7 +536,7 @@ func (m *ManagerImpl) readCheckpoint() error {
536536
m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil}
537537
}
538538

539-
klog.V(4).InfoS("read data from checkpoint file", "checkpoint", kubeletDeviceManagerCheckpoint)
539+
klog.V(4).InfoS("Read data from checkpoint file", "checkpoint", kubeletDeviceManagerCheckpoint)
540540
return nil
541541
}
542542

@@ -599,7 +599,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
599599
// running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be
600600
// because it already has access to all the required devices, so we got nothing to do and we can bail out.
601601
if !m.sourcesReady.AllReady() && m.isContainerAlreadyRunning(podUID, contName) {
602-
klog.V(3).InfoS("container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
602+
klog.V(3).InfoS("Container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
603603
return nil, nil
604604
}
605605

@@ -630,7 +630,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
630630
// We handled the known error paths in scenario 3 (node reboot), so from now on we can fall back in a common path.
631631
// We cover container restart on kubelet steady state with the same flow.
632632
if needed == 0 {
633-
klog.V(3).InfoS("no devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
633+
klog.V(3).InfoS("No devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
634634
// No change, no work.
635635
return nil, nil
636636
}
@@ -955,7 +955,7 @@ func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co
955955
}
956956

957957
if !m.checkPodActive(pod) {
958-
klog.V(5).InfoS("pod deleted from activePods, skip to reAllocate", "pod", klog.KObj(pod), "podUID", podUID, "containerName", container.Name)
958+
klog.V(5).InfoS("Pod deleted from activePods, skip to reAllocate", "pod", klog.KObj(pod), "podUID", podUID, "containerName", container.Name)
959959
continue
960960
}
961961

@@ -1170,19 +1170,19 @@ func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool {
11701170
func (m *ManagerImpl) isContainerAlreadyRunning(podUID, cntName string) bool {
11711171
cntID, err := m.containerMap.GetContainerID(podUID, cntName)
11721172
if err != nil {
1173-
klog.ErrorS(err, "container not found in the initial map, assumed NOT running", "podUID", podUID, "containerName", cntName)
1173+
klog.ErrorS(err, "Container not found in the initial map, assumed NOT running", "podUID", podUID, "containerName", cntName)
11741174
return false
11751175
}
11761176

11771177
// note that if container runtime is down when kubelet restarts, this set will be empty,
11781178
// so on kubelet restart containers will again fail admission, hitting https://github.com/kubernetes/kubernetes/issues/118559 again.
11791179
// This scenario should however be rare enough.
11801180
if !m.containerRunningSet.Has(cntID) {
1181-
klog.V(4).InfoS("container not present in the initial running set", "podUID", podUID, "containerName", cntName, "containerID", cntID)
1181+
klog.V(4).InfoS("Container not present in the initial running set", "podUID", podUID, "containerName", cntName, "containerID", cntID)
11821182
return false
11831183
}
11841184

11851185
// Once we make it here we know we have a running container.
1186-
klog.V(4).InfoS("container found in the initial set, assumed running", "podUID", podUID, "containerName", cntName, "containerID", cntID)
1186+
klog.V(4).InfoS("Container found in the initial set, assumed running", "podUID", podUID, "containerName", cntName, "containerID", cntID)
11871187
return true
11881188
}

pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ func (c *client) Disconnect() error {
107107
c.mutex.Unlock()
108108
c.handler.PluginDisconnected(c.resource)
109109

110-
klog.V(2).InfoS("device plugin disconnected", "resource", c.resource)
110+
klog.V(2).InfoS("Device plugin disconnected", "resource", c.resource)
111111
return nil
112112
}
113113

pkg/kubelet/cm/devicemanager/plugin/v1beta1/handler.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ func (s *server) ValidatePlugin(pluginName string, endpoint string, versions []s
6262
return fmt.Errorf("invalid name of device plugin socket: %s", fmt.Sprintf(errInvalidResourceName, pluginName))
6363
}
6464

65-
klog.V(2).InfoS("plugin validated", "plugin", pluginName, "endpoint", endpoint, "versions", versions)
65+
klog.V(2).InfoS("Device plugin validated", "plugin", pluginName, "endpoint", endpoint, "versions", versions)
6666
return nil
6767
}
6868

pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ func (s *server) Start() error {
128128
func (s *server) Stop() error {
129129
s.visitClients(func(r string, c Client) {
130130
if err := s.disconnectClient(r, c); err != nil {
131-
klog.ErrorS(err, "Error disconnecting device plugin client", "resourceName", r)
131+
klog.ErrorS(err, "Failed to disconnect device plugin client", "resourceName", r)
132132
}
133133
})
134134

0 commit comments

Comments
 (0)