Skip to content

Commit 529b019

Browse files
authored
Merge pull request kubernetes#126843 from oxxenix/migrate-pleg-to-contextual-logging
Migrate PLEG to contextual logging
2 parents 3ab0c84 + 2474369 commit 529b019

File tree

8 files changed

+50
-39
lines changed

8 files changed

+50
-39
lines changed

hack/golangci-hints.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,7 @@ linters-settings: # please keep this alphabetized
152152
contextual k8s.io/kubernetes/pkg/scheduler/.*
153153
contextual k8s.io/kubernetes/test/e2e/dra/.*
154154
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
155+
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
155156
156157
# As long as contextual logging is alpha or beta, all WithName, WithValues,
157158
# NewContext calls have to go through klog. Once it is GA, we can lift

hack/golangci-strict.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,7 @@ linters-settings: # please keep this alphabetized
198198
contextual k8s.io/kubernetes/pkg/scheduler/.*
199199
contextual k8s.io/kubernetes/test/e2e/dra/.*
200200
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
201+
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
201202
202203
# As long as contextual logging is alpha or beta, all WithName, WithValues,
203204
# NewContext calls have to go through klog. Once it is GA, we can lift

hack/golangci.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,7 @@ linters-settings: # please keep this alphabetized
201201
contextual k8s.io/kubernetes/pkg/scheduler/.*
202202
contextual k8s.io/kubernetes/test/e2e/dra/.*
203203
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
204+
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
204205
205206
# As long as contextual logging is alpha or beta, all WithName, WithValues,
206207
# NewContext calls have to go through klog. Once it is GA, we can lift

hack/logcheck.conf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ contextual k8s.io/kubernetes/pkg/controller/.*
4848
contextual k8s.io/kubernetes/pkg/scheduler/.*
4949
contextual k8s.io/kubernetes/test/e2e/dra/.*
5050
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
51+
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
5152

5253
# As long as contextual logging is alpha or beta, all WithName, WithValues,
5354
# NewContext calls have to go through klog. Once it is GA, we can lift

pkg/kubelet/kubelet.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -738,15 +738,15 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
738738
RelistPeriod: eventedPlegRelistPeriod,
739739
RelistThreshold: eventedPlegRelistThreshold,
740740
}
741-
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, eventChannel, genericRelistDuration, klet.podCache, clock.RealClock{})
741+
klet.pleg = pleg.NewGenericPLEG(logger, klet.containerRuntime, eventChannel, genericRelistDuration, klet.podCache, clock.RealClock{})
742742
// In case Evented PLEG has to fall back on Generic PLEG due to an error,
743743
// Evented PLEG should be able to reset the Generic PLEG relisting duration
744744
// to the default value.
745745
eventedRelistDuration := &pleg.RelistDuration{
746746
RelistPeriod: genericPlegRelistPeriod,
747747
RelistThreshold: genericPlegRelistThreshold,
748748
}
749-
klet.eventedPleg, err = pleg.NewEventedPLEG(klet.containerRuntime, klet.runtimeService, eventChannel,
749+
klet.eventedPleg, err = pleg.NewEventedPLEG(logger, klet.containerRuntime, klet.runtimeService, eventChannel,
750750
klet.podCache, klet.pleg, eventedPlegMaxStreamRetries, eventedRelistDuration, clock.RealClock{})
751751
if err != nil {
752752
return nil, err
@@ -756,7 +756,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
756756
RelistPeriod: genericPlegRelistPeriod,
757757
RelistThreshold: genericPlegRelistThreshold,
758758
}
759-
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, eventChannel, genericRelistDuration, klet.podCache, clock.RealClock{})
759+
klet.pleg = pleg.NewGenericPLEG(logger, klet.containerRuntime, eventChannel, genericRelistDuration, klet.podCache, clock.RealClock{})
760760
}
761761

762762
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)

pkg/kubelet/kubelet_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ func newTestKubeletWithImageList(
334334
kubelet.resyncInterval = 10 * time.Second
335335
kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
336336
// Relist period does not affect the tests.
337-
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, make(chan *pleg.PodLifecycleEvent, 100), &pleg.RelistDuration{RelistPeriod: time.Hour, RelistThreshold: genericPlegRelistThreshold}, kubelet.podCache, clock.RealClock{})
337+
kubelet.pleg = pleg.NewGenericPLEG(logger, fakeRuntime, make(chan *pleg.PodLifecycleEvent, 100), &pleg.RelistDuration{RelistPeriod: time.Hour, RelistThreshold: genericPlegRelistThreshold}, kubelet.podCache, clock.RealClock{})
338338
kubelet.clock = fakeClock
339339

340340
nodeRef := &v1.ObjectReference{

pkg/kubelet/pleg/evented.go

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -83,10 +83,12 @@ type EventedPLEG struct {
8383
stopCacheUpdateCh chan struct{}
8484
// Locks the start/stop operation of the Evented PLEG.
8585
runningMu sync.Mutex
86+
// logger is used for contextual logging
87+
logger klog.Logger
8688
}
8789

8890
// NewEventedPLEG instantiates a new EventedPLEG object and return it.
89-
func NewEventedPLEG(runtime kubecontainer.Runtime, runtimeService internalapi.RuntimeService, eventChannel chan *PodLifecycleEvent,
91+
func NewEventedPLEG(logger klog.Logger, runtime kubecontainer.Runtime, runtimeService internalapi.RuntimeService, eventChannel chan *PodLifecycleEvent,
9092
cache kubecontainer.Cache, genericPleg PodLifecycleEventGenerator, eventedPlegMaxStreamRetries int,
9193
relistDuration *RelistDuration, clock clock.Clock) (PodLifecycleEventGenerator, error) {
9294
handler, ok := genericPleg.(podLifecycleEventGeneratorHandler)
@@ -102,6 +104,7 @@ func NewEventedPLEG(runtime kubecontainer.Runtime, runtimeService internalapi.Ru
102104
eventedPlegMaxStreamRetries: eventedPlegMaxStreamRetries,
103105
relistDuration: relistDuration,
104106
clock: clock,
107+
logger: logger,
105108
}, nil
106109
}
107110

@@ -184,7 +187,7 @@ func (e *EventedPLEG) watchEventsChannel() {
184187
if numAttempts >= e.eventedPlegMaxStreamRetries {
185188
if isEventedPLEGInUse() {
186189
// Fall back to Generic PLEG relisting since Evented PLEG is not working.
187-
klog.V(4).InfoS("Fall back to Generic PLEG relisting since Evented PLEG is not working")
190+
e.logger.V(4).Info("Fall back to Generic PLEG relisting since Evented PLEG is not working")
188191
e.Stop()
189192
e.genericPleg.Stop() // Stop the existing Generic PLEG which runs with longer relisting period when Evented PLEG is in use.
190193
e.Update(e.relistDuration) // Update the relisting period to the default value for the Generic PLEG.
@@ -200,7 +203,7 @@ func (e *EventedPLEG) watchEventsChannel() {
200203
metrics.EventedPLEGConnErr.Inc()
201204
numAttempts++
202205
e.Relist() // Force a relist to get the latest container and pods running metric.
203-
klog.V(4).InfoS("Evented PLEG: Failed to get container events, retrying: ", "err", err)
206+
e.logger.V(4).Info("Evented PLEG: Failed to get container events, retrying: ", "err", err)
204207
}
205208
}
206209
}()
@@ -221,7 +224,7 @@ func (e *EventedPLEG) processCRIEvents(containerEventsResponseCh chan *runtimeap
221224
// b) in worst case, a relist will eventually sync the pod status.
222225
// TODO(#114371): Figure out a way to handle this case instead of ignoring.
223226
if event.PodSandboxStatus == nil || event.PodSandboxStatus.Metadata == nil {
224-
klog.ErrorS(nil, "Evented PLEG: received ContainerEventResponse with nil PodSandboxStatus or PodSandboxStatus.Metadata", "containerEventResponse", event)
227+
e.logger.Error(nil, "Evented PLEG: received ContainerEventResponse with nil PodSandboxStatus or PodSandboxStatus.Metadata", "containerEventResponse", event)
225228
continue
226229
}
227230

@@ -234,15 +237,15 @@ func (e *EventedPLEG) processCRIEvents(containerEventsResponseCh chan *runtimeap
234237
// if branch is okay, we just use it to determine whether the
235238
// additional "podStatus" key and its value should be added.
236239
if klog.V(6).Enabled() {
237-
klog.ErrorS(err, "Evented PLEG: error generating pod status from the received event", "podUID", podID, "podStatus", status)
240+
e.logger.Error(err, "Evented PLEG: error generating pod status from the received event", "podUID", podID, "podStatus", status)
238241
} else {
239-
klog.ErrorS(err, "Evented PLEG: error generating pod status from the received event", "podUID", podID)
242+
e.logger.Error(err, "Evented PLEG: error generating pod status from the received event", "podUID", podID)
240243
}
241244
} else {
242-
if klogV := klog.V(6); klogV.Enabled() {
243-
klogV.InfoS("Evented PLEG: Generated pod status from the received event", "podUID", podID, "podStatus", status)
245+
if klogV := e.logger.V(6); klogV.Enabled() {
246+
e.logger.Info("Evented PLEG: Generated pod status from the received event", "podUID", podID, "podStatus", status)
244247
} else {
245-
klog.V(4).InfoS("Evented PLEG: Generated pod status from the received event", "podUID", podID)
248+
e.logger.V(4).Info("Evented PLEG: Generated pod status from the received event", "podUID", podID)
246249
}
247250
// Preserve the pod IP across cache updates if the new IP is empty.
248251
// When a pod is torn down, kubelet may race with PLEG and retrieve
@@ -282,23 +285,23 @@ func (e *EventedPLEG) processCRIEvent(event *runtimeapi.ContainerEventResponse)
282285
switch event.ContainerEventType {
283286
case runtimeapi.ContainerEventType_CONTAINER_STOPPED_EVENT:
284287
e.sendPodLifecycleEvent(&PodLifecycleEvent{ID: types.UID(event.PodSandboxStatus.Metadata.Uid), Type: ContainerDied, Data: event.ContainerId})
285-
klog.V(4).InfoS("Received Container Stopped Event", "event", event.String())
288+
e.logger.V(4).Info("Received Container Stopped Event", "event", event.String())
286289
case runtimeapi.ContainerEventType_CONTAINER_CREATED_EVENT:
287290
// We only need to update the pod status on container create.
288291
// But we don't have to generate any PodLifeCycleEvent. Container creation related
289292
// PodLifeCycleEvent is ignored by the existing Generic PLEG as well.
290293
// https://github.com/kubernetes/kubernetes/blob/24753aa8a4df8d10bfd6330e0f29186000c018be/pkg/kubelet/pleg/generic.go#L88 and
291294
// https://github.com/kubernetes/kubernetes/blob/24753aa8a4df8d10bfd6330e0f29186000c018be/pkg/kubelet/pleg/generic.go#L273
292-
klog.V(4).InfoS("Received Container Created Event", "event", event.String())
295+
e.logger.V(4).Info("Received Container Created Event", "event", event.String())
293296
case runtimeapi.ContainerEventType_CONTAINER_STARTED_EVENT:
294297
e.sendPodLifecycleEvent(&PodLifecycleEvent{ID: types.UID(event.PodSandboxStatus.Metadata.Uid), Type: ContainerStarted, Data: event.ContainerId})
295-
klog.V(4).InfoS("Received Container Started Event", "event", event.String())
298+
e.logger.V(4).Info("Received Container Started Event", "event", event.String())
296299
case runtimeapi.ContainerEventType_CONTAINER_DELETED_EVENT:
297300
// In case the pod is deleted it is safe to generate both ContainerDied and ContainerRemoved events, just like in the case of
298301
// Generic PLEG. https://github.com/kubernetes/kubernetes/blob/24753aa8a4df8d10bfd6330e0f29186000c018be/pkg/kubelet/pleg/generic.go#L169
299302
e.sendPodLifecycleEvent(&PodLifecycleEvent{ID: types.UID(event.PodSandboxStatus.Metadata.Uid), Type: ContainerDied, Data: event.ContainerId})
300303
e.sendPodLifecycleEvent(&PodLifecycleEvent{ID: types.UID(event.PodSandboxStatus.Metadata.Uid), Type: ContainerRemoved, Data: event.ContainerId})
301-
klog.V(4).InfoS("Received Container Deleted Event", "event", event)
304+
e.logger.V(4).Info("Received Container Deleted Event", "event", event)
302305
}
303306
}
304307

@@ -330,7 +333,7 @@ func (e *EventedPLEG) sendPodLifecycleEvent(event *PodLifecycleEvent) {
330333
default:
331334
// record how many events were discarded due to channel out of capacity
332335
metrics.PLEGDiscardEvents.Inc()
333-
klog.ErrorS(nil, "Evented PLEG: Event channel is full, discarded pod lifecycle event")
336+
e.logger.Error(nil, "Evented PLEG: Event channel is full, discarded pod lifecycle event")
334337
}
335338
}
336339

@@ -356,7 +359,7 @@ func getPodSandboxState(podStatus *kubecontainer.PodStatus) kubecontainer.State
356359
func (e *EventedPLEG) updateRunningPodMetric(podStatus *kubecontainer.PodStatus) {
357360
cachedPodStatus, err := e.cache.Get(podStatus.ID)
358361
if err != nil {
359-
klog.ErrorS(err, "Evented PLEG: Get cache", "podID", podStatus.ID)
362+
e.logger.Error(err, "Evented PLEG: Get cache", "podID", podStatus.ID)
360363
}
361364
// cache miss condition: The pod status object will have empty state if missed in cache
362365
if len(cachedPodStatus.SandboxStatuses) < 1 {
@@ -387,7 +390,7 @@ func getContainerStateCount(podStatus *kubecontainer.PodStatus) map[kubecontaine
387390
func (e *EventedPLEG) updateRunningContainerMetric(podStatus *kubecontainer.PodStatus) {
388391
cachedPodStatus, err := e.cache.Get(podStatus.ID)
389392
if err != nil {
390-
klog.ErrorS(err, "Evented PLEG: Get cache", "podID", podStatus.ID)
393+
e.logger.Error(err, "Evented PLEG: Get cache", "podID", podStatus.ID)
391394
}
392395

393396
// cache miss condition: The pod status object will have empty state if missed in cache

pkg/kubelet/pleg/generic.go

Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,8 @@ type GenericPLEG struct {
7878
relistDuration *RelistDuration
7979
// Mutex to serialize updateCache called by relist vs UpdateCache interface
8080
podCacheMutex sync.Mutex
81+
// logger is used for contextual logging
82+
logger klog.Logger
8183
}
8284

8385
// plegContainerState has a one-to-one mapping to the
@@ -116,10 +118,11 @@ type podRecord struct {
116118
type podRecords map[types.UID]*podRecord
117119

118120
// NewGenericPLEG instantiates a new GenericPLEG object and return it.
119-
func NewGenericPLEG(runtime kubecontainer.Runtime, eventChannel chan *PodLifecycleEvent,
121+
func NewGenericPLEG(logger klog.Logger, runtime kubecontainer.Runtime, eventChannel chan *PodLifecycleEvent,
120122
relistDuration *RelistDuration, cache kubecontainer.Cache,
121123
clock clock.Clock) PodLifecycleEventGenerator {
122124
return &GenericPLEG{
125+
logger: logger,
123126
relistDuration: relistDuration,
124127
runtime: runtime,
125128
eventChannel: eventChannel,
@@ -176,12 +179,12 @@ func (g *GenericPLEG) Healthy() (bool, error) {
176179
return true, nil
177180
}
178181

179-
func generateEvents(podID types.UID, cid string, oldState, newState plegContainerState) []*PodLifecycleEvent {
182+
func generateEvents(logger klog.Logger, podID types.UID, cid string, oldState, newState plegContainerState) []*PodLifecycleEvent {
180183
if newState == oldState {
181184
return nil
182185
}
183186

184-
klog.V(4).InfoS("GenericPLEG", "podUID", podID, "containerID", cid, "oldState", oldState, "newState", newState)
187+
logger.V(4).Info("GenericPLEG", "podUID", podID, "containerID", cid, "oldState", oldState, "newState", newState)
185188
switch newState {
186189
case plegContainerRunning:
187190
return []*PodLifecycleEvent{{ID: podID, Type: ContainerStarted, Data: cid}}
@@ -221,7 +224,8 @@ func (g *GenericPLEG) Relist() {
221224
defer g.relistLock.Unlock()
222225

223226
ctx := context.Background()
224-
klog.V(5).InfoS("GenericPLEG: Relisting")
227+
228+
g.logger.V(5).Info("GenericPLEG: Relisting")
225229

226230
if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() {
227231
metrics.PLEGRelistInterval.Observe(metrics.SinceInSeconds(lastRelistTime))
@@ -235,7 +239,7 @@ func (g *GenericPLEG) Relist() {
235239
// Get all the pods.
236240
podList, err := g.runtime.GetPods(ctx, true)
237241
if err != nil {
238-
klog.ErrorS(err, "GenericPLEG: Unable to retrieve pods")
242+
g.logger.Error(err, "GenericPLEG: Unable to retrieve pods")
239243
return
240244
}
241245

@@ -254,7 +258,7 @@ func (g *GenericPLEG) Relist() {
254258
// Get all containers in the old and the new pod.
255259
allContainers := getContainersFromPods(oldPod, pod)
256260
for _, container := range allContainers {
257-
events := computeEvents(oldPod, pod, &container.ID)
261+
events := computeEvents(g.logger, oldPod, pod, &container.ID)
258262
for _, e := range events {
259263
updateEvents(eventsByPodID, e)
260264
}
@@ -282,7 +286,7 @@ func (g *GenericPLEG) Relist() {
282286
// parallelize if needed.
283287
if err, updated := g.updateCache(ctx, pod, pid); err != nil {
284288
// Rely on updateCache calling GetPodStatus to log the actual error.
285-
klog.V(4).ErrorS(err, "PLEG: Ignoring events for pod", "pod", klog.KRef(pod.Namespace, pod.Name))
289+
g.logger.V(4).Error(err, "PLEG: Ignoring events for pod", "pod", klog.KRef(pod.Namespace, pod.Name))
286290

287291
// make sure we try to reinspect the pod during the next relisting
288292
needsReinspection[pid] = pod
@@ -315,7 +319,7 @@ func (g *GenericPLEG) Relist() {
315319
case g.eventChannel <- events[i]:
316320
default:
317321
metrics.PLEGDiscardEvents.Inc()
318-
klog.ErrorS(nil, "Event channel is full, discard this relist() cycle event")
322+
g.logger.Error(nil, "Event channel is full, discard this relist() cycle event")
319323
}
320324
// Log exit code of containers when they finished in a particular event
321325
if events[i].Type == ContainerDied {
@@ -331,7 +335,7 @@ func (g *GenericPLEG) Relist() {
331335
}
332336
if containerID, ok := events[i].Data.(string); ok {
333337
if exitCode, ok := containerExitCode[containerID]; ok && pod != nil {
334-
klog.V(2).InfoS("Generic (PLEG): container finished", "podID", pod.ID, "containerID", containerID, "exitCode", exitCode)
338+
g.logger.V(2).Info("Generic (PLEG): container finished", "podID", pod.ID, "containerID", containerID, "exitCode", exitCode)
335339
}
336340
}
337341
}
@@ -341,11 +345,11 @@ func (g *GenericPLEG) Relist() {
341345
if g.cacheEnabled() {
342346
// reinspect any pods that failed inspection during the previous relist
343347
if len(g.podsToReinspect) > 0 {
344-
klog.V(5).InfoS("GenericPLEG: Reinspecting pods that previously failed inspection")
348+
g.logger.V(5).Info("GenericPLEG: Reinspecting pods that previously failed inspection")
345349
for pid, pod := range g.podsToReinspect {
346350
if err, _ := g.updateCache(ctx, pod, pid); err != nil {
347351
// Rely on updateCache calling GetPodStatus to log the actual error.
348-
klog.V(5).ErrorS(err, "PLEG: pod failed reinspection", "pod", klog.KRef(pod.Namespace, pod.Name))
352+
g.logger.V(5).Error(err, "PLEG: pod failed reinspection", "pod", klog.KRef(pod.Namespace, pod.Name))
349353
needsReinspection[pid] = pod
350354
}
351355
}
@@ -386,7 +390,7 @@ func getContainersFromPods(pods ...*kubecontainer.Pod) []*kubecontainer.Containe
386390
return containers
387391
}
388392

389-
func computeEvents(oldPod, newPod *kubecontainer.Pod, cid *kubecontainer.ContainerID) []*PodLifecycleEvent {
393+
func computeEvents(logger klog.Logger, oldPod, newPod *kubecontainer.Pod, cid *kubecontainer.ContainerID) []*PodLifecycleEvent {
390394
var pid types.UID
391395
if oldPod != nil {
392396
pid = oldPod.ID
@@ -395,7 +399,7 @@ func computeEvents(oldPod, newPod *kubecontainer.Pod, cid *kubecontainer.Contain
395399
}
396400
oldState := getContainerState(oldPod, cid)
397401
newState := getContainerState(newPod, cid)
398-
return generateEvents(pid, cid.ID, oldState, newState)
402+
return generateEvents(logger, pid, cid.ID, oldState, newState)
399403
}
400404

401405
func (g *GenericPLEG) cacheEnabled() bool {
@@ -433,7 +437,7 @@ func (g *GenericPLEG) updateCache(ctx context.Context, pod *kubecontainer.Pod, p
433437
if pod == nil {
434438
// The pod is missing in the current relist. This means that
435439
// the pod has no visible (active or inactive) containers.
436-
klog.V(4).InfoS("PLEG: Delete status for pod", "podUID", string(pid))
440+
g.logger.V(4).Info("PLEG: Delete status for pod", "podUID", string(pid))
437441
g.cache.Delete(pid)
438442
return nil, true
439443
}
@@ -448,15 +452,15 @@ func (g *GenericPLEG) updateCache(ctx context.Context, pod *kubecontainer.Pod, p
448452
// if branch is okay, we just use it to determine whether the
449453
// additional "podStatus" key and its value should be added.
450454
if klog.V(6).Enabled() {
451-
klog.ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
455+
g.logger.Error(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
452456
} else {
453-
klog.ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
457+
g.logger.Error(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
454458
}
455459
} else {
456-
if klogV := klog.V(6); klogV.Enabled() {
457-
klogV.InfoS("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
460+
if klogV := g.logger.V(6); klogV.Enabled() {
461+
g.logger.Info("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
458462
} else {
459-
klog.V(4).InfoS("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
463+
g.logger.V(4).Info("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
460464
}
461465
// Preserve the pod IP across cache updates if the new IP is empty.
462466
// When a pod is torn down, kubelet may race with PLEG and retrieve

0 commit comments

Comments
 (0)