@@ -1809,7 +1809,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
1809
1809
}
1810
1810
1811
1811
// Start component sync loops.
1812
- kl .statusManager .Start ()
1812
+ kl .statusManager .Start (ctx )
1813
1813
1814
1814
// Start syncing RuntimeClasses if enabled.
1815
1815
if kl .runtimeClassManager != nil {
@@ -1888,6 +1888,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
1888
1888
attribute .String ("k8s.pod.update_type" , updateType .String ()),
1889
1889
semconv .K8SNamespaceNameKey .String (pod .Namespace ),
1890
1890
))
1891
+ logger := klog .FromContext (ctx )
1891
1892
klog .V (4 ).InfoS ("SyncPod enter" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
1892
1893
defer func () {
1893
1894
if err != nil {
@@ -1948,7 +1949,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
1948
1949
1949
1950
// If the pod is terminal, we don't need to continue to setup the pod
1950
1951
if apiPodStatus .Phase == v1 .PodSucceeded || apiPodStatus .Phase == v1 .PodFailed {
1951
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
1952
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
1952
1953
isTerminal = true
1953
1954
return isTerminal , nil
1954
1955
}
@@ -1961,7 +1962,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
1961
1962
metrics .PodStartDuration .Observe (metrics .SinceInSeconds (firstSeenTime ))
1962
1963
}
1963
1964
1964
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
1965
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
1965
1966
1966
1967
// If the network plugin is not ready, only start the pod if it uses the host network
1967
1968
if err := kl .runtimeState .networkErrors (); err != nil && ! kubecontainer .IsHostNetworkPod (pod ) {
@@ -2099,6 +2100,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
2099
2100
semconv .K8SPodNameKey .String (pod .Name ),
2100
2101
semconv .K8SNamespaceNameKey .String (pod .Namespace ),
2101
2102
))
2103
+ logger := klog .FromContext (ctx )
2102
2104
defer otelSpan .End ()
2103
2105
klog .V (4 ).InfoS ("SyncTerminatingPod enter" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
2104
2106
defer klog .V (4 ).InfoS ("SyncTerminatingPod exit" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
@@ -2112,7 +2114,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
2112
2114
if podStatusFn != nil {
2113
2115
podStatusFn (& apiPodStatus )
2114
2116
}
2115
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
2117
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
2116
2118
2117
2119
if gracePeriod != nil {
2118
2120
klog .V (4 ).InfoS ("Pod terminating with grace period" , "pod" , klog .KObj (pod ), "podUID" , pod .UID , "gracePeriod" , * gracePeriod )
@@ -2187,7 +2189,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
2187
2189
// information about the container end states (including exit codes) - when
2188
2190
// SyncTerminatedPod is called the containers may already be removed.
2189
2191
apiPodStatus = kl .generateAPIPodStatus (pod , stoppedPodStatus , true )
2190
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
2192
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
2191
2193
2192
2194
// we have successfully stopped all containers, the pod is terminating, our status is "done"
2193
2195
klog .V (4 ).InfoS ("Pod termination stopped all running containers" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
@@ -2249,6 +2251,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
2249
2251
semconv .K8SPodNameKey .String (pod .Name ),
2250
2252
semconv .K8SNamespaceNameKey .String (pod .Namespace ),
2251
2253
))
2254
+ logger := klog .FromContext (ctx )
2252
2255
defer otelSpan .End ()
2253
2256
klog .V (4 ).InfoS ("SyncTerminatedPod enter" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
2254
2257
defer klog .V (4 ).InfoS ("SyncTerminatedPod exit" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
@@ -2262,7 +2265,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
2262
2265
// TODO: should we simply fold this into TerminatePod? that would give a single pod update
2263
2266
apiPodStatus := kl .generateAPIPodStatus (pod , podStatus , true )
2264
2267
2265
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
2268
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
2266
2269
2267
2270
// volumes are unmounted after the pod worker reports ShouldPodRuntimeBeRemoved (which is satisfied
2268
2271
// before syncTerminatedPod is invoked)
@@ -2309,7 +2312,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
2309
2312
kl .usernsManager .Release (pod .UID )
2310
2313
2311
2314
// mark the final pod status
2312
- kl .statusManager .TerminatePod (pod )
2315
+ kl .statusManager .TerminatePod (logger , pod )
2313
2316
klog .V (4 ).InfoS ("Pod is terminated and will need no more status updates" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
2314
2317
2315
2318
return nil
@@ -2379,7 +2382,7 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
2379
2382
// and updates the pod to the failed phase in the status manager.
2380
2383
func (kl * Kubelet ) rejectPod (pod * v1.Pod , reason , message string ) {
2381
2384
kl .recorder .Eventf (pod , v1 .EventTypeWarning , reason , message )
2382
- kl .statusManager .SetPodStatus (pod , v1.PodStatus {
2385
+ kl .statusManager .SetPodStatus (klog . TODO (), pod , v1.PodStatus {
2383
2386
QOSClass : v1qos .GetPodQOS (pod ), // keep it as is
2384
2387
Phase : v1 .PodFailed ,
2385
2388
Reason : reason ,
@@ -2507,6 +2510,7 @@ func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpd
2507
2510
// containers have failed health checks
2508
2511
func (kl * Kubelet ) syncLoopIteration (ctx context.Context , configCh <- chan kubetypes.PodUpdate , handler SyncHandler ,
2509
2512
syncCh <- chan time.Time , housekeepingCh <- chan time.Time , plegCh <- chan * pleg.PodLifecycleEvent ) bool {
2513
+ logger := klog .FromContext (ctx )
2510
2514
select {
2511
2515
case u , open := <- configCh :
2512
2516
// Update from a config source; dispatch it to the right handler
@@ -2577,7 +2581,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety
2577
2581
}
2578
2582
case update := <- kl .readinessManager .Updates ():
2579
2583
ready := update .Result == proberesults .Success
2580
- kl .statusManager .SetContainerReadiness (update .PodUID , update .ContainerID , ready )
2584
+ kl .statusManager .SetContainerReadiness (logger , update .PodUID , update .ContainerID , ready )
2581
2585
2582
2586
status := "not ready"
2583
2587
if ready {
@@ -2586,7 +2590,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety
2586
2590
handleProbeSync (kl , update , handler , "readiness" , status )
2587
2591
case update := <- kl .startupManager .Updates ():
2588
2592
started := update .Result == proberesults .Success
2589
- kl .statusManager .SetContainerStartup (update .PodUID , update .ContainerID , started )
2593
+ kl .statusManager .SetContainerStartup (logger , update .PodUID , update .ContainerID , started )
2590
2594
2591
2595
status := "unhealthy"
2592
2596
if started {
0 commit comments