@@ -1810,7 +1810,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
1810
1810
}
1811
1811
1812
1812
// Start component sync loops.
1813
- kl .statusManager .Start ()
1813
+ kl .statusManager .Start (ctx )
1814
1814
1815
1815
// Start syncing RuntimeClasses if enabled.
1816
1816
if kl .runtimeClassManager != nil {
@@ -1889,6 +1889,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
1889
1889
attribute .String ("k8s.pod.update_type" , updateType .String ()),
1890
1890
semconv .K8SNamespaceNameKey .String (pod .Namespace ),
1891
1891
))
1892
+ logger := klog .FromContext (ctx )
1892
1893
klog .V (4 ).InfoS ("SyncPod enter" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
1893
1894
defer func () {
1894
1895
if err != nil {
@@ -1949,7 +1950,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
1949
1950
1950
1951
// If the pod is terminal, we don't need to continue to setup the pod
1951
1952
if apiPodStatus .Phase == v1 .PodSucceeded || apiPodStatus .Phase == v1 .PodFailed {
1952
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
1953
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
1953
1954
isTerminal = true
1954
1955
return isTerminal , nil
1955
1956
}
@@ -1962,7 +1963,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
1962
1963
metrics .PodStartDuration .Observe (metrics .SinceInSeconds (firstSeenTime ))
1963
1964
}
1964
1965
1965
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
1966
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
1966
1967
1967
1968
// If the network plugin is not ready, only start the pod if it uses the host network
1968
1969
if err := kl .runtimeState .networkErrors (); err != nil && ! kubecontainer .IsHostNetworkPod (pod ) {
@@ -2100,6 +2101,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
2100
2101
semconv .K8SPodNameKey .String (pod .Name ),
2101
2102
semconv .K8SNamespaceNameKey .String (pod .Namespace ),
2102
2103
))
2104
+ logger := klog .FromContext (ctx )
2103
2105
defer otelSpan .End ()
2104
2106
klog .V (4 ).InfoS ("SyncTerminatingPod enter" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
2105
2107
defer klog .V (4 ).InfoS ("SyncTerminatingPod exit" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
@@ -2113,7 +2115,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
2113
2115
if podStatusFn != nil {
2114
2116
podStatusFn (& apiPodStatus )
2115
2117
}
2116
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
2118
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
2117
2119
2118
2120
if gracePeriod != nil {
2119
2121
klog .V (4 ).InfoS ("Pod terminating with grace period" , "pod" , klog .KObj (pod ), "podUID" , pod .UID , "gracePeriod" , * gracePeriod )
@@ -2188,7 +2190,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
2188
2190
// information about the container end states (including exit codes) - when
2189
2191
// SyncTerminatedPod is called the containers may already be removed.
2190
2192
apiPodStatus = kl .generateAPIPodStatus (pod , stoppedPodStatus , true )
2191
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
2193
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
2192
2194
2193
2195
// we have successfully stopped all containers, the pod is terminating, our status is "done"
2194
2196
klog .V (4 ).InfoS ("Pod termination stopped all running containers" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
@@ -2250,6 +2252,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
2250
2252
semconv .K8SPodNameKey .String (pod .Name ),
2251
2253
semconv .K8SNamespaceNameKey .String (pod .Namespace ),
2252
2254
))
2255
+ logger := klog .FromContext (ctx )
2253
2256
defer otelSpan .End ()
2254
2257
klog .V (4 ).InfoS ("SyncTerminatedPod enter" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
2255
2258
defer klog .V (4 ).InfoS ("SyncTerminatedPod exit" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
@@ -2263,7 +2266,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
2263
2266
// TODO: should we simply fold this into TerminatePod? that would give a single pod update
2264
2267
apiPodStatus := kl .generateAPIPodStatus (pod , podStatus , true )
2265
2268
2266
- kl .statusManager .SetPodStatus (pod , apiPodStatus )
2269
+ kl .statusManager .SetPodStatus (logger , pod , apiPodStatus )
2267
2270
2268
2271
// volumes are unmounted after the pod worker reports ShouldPodRuntimeBeRemoved (which is satisfied
2269
2272
// before syncTerminatedPod is invoked)
@@ -2310,7 +2313,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
2310
2313
kl .usernsManager .Release (pod .UID )
2311
2314
2312
2315
// mark the final pod status
2313
- kl .statusManager .TerminatePod (pod )
2316
+ kl .statusManager .TerminatePod (logger , pod )
2314
2317
klog .V (4 ).InfoS ("Pod is terminated and will need no more status updates" , "pod" , klog .KObj (pod ), "podUID" , pod .UID )
2315
2318
2316
2319
return nil
@@ -2380,7 +2383,7 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
2380
2383
// and updates the pod to the failed phase in the status manager.
2381
2384
func (kl * Kubelet ) rejectPod (pod * v1.Pod , reason , message string ) {
2382
2385
kl .recorder .Eventf (pod , v1 .EventTypeWarning , reason , message )
2383
- kl .statusManager .SetPodStatus (pod , v1.PodStatus {
2386
+ kl .statusManager .SetPodStatus (klog . TODO (), pod , v1.PodStatus {
2384
2387
QOSClass : v1qos .GetPodQOS (pod ), // keep it as is
2385
2388
Phase : v1 .PodFailed ,
2386
2389
Reason : reason ,
@@ -2508,6 +2511,7 @@ func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpd
2508
2511
// containers have failed health checks
2509
2512
func (kl * Kubelet ) syncLoopIteration (ctx context.Context , configCh <- chan kubetypes.PodUpdate , handler SyncHandler ,
2510
2513
syncCh <- chan time.Time , housekeepingCh <- chan time.Time , plegCh <- chan * pleg.PodLifecycleEvent ) bool {
2514
+ logger := klog .FromContext (ctx )
2511
2515
select {
2512
2516
case u , open := <- configCh :
2513
2517
// Update from a config source; dispatch it to the right handler
@@ -2578,7 +2582,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety
2578
2582
}
2579
2583
case update := <- kl .readinessManager .Updates ():
2580
2584
ready := update .Result == proberesults .Success
2581
- kl .statusManager .SetContainerReadiness (update .PodUID , update .ContainerID , ready )
2585
+ kl .statusManager .SetContainerReadiness (logger , update .PodUID , update .ContainerID , ready )
2582
2586
2583
2587
status := "not ready"
2584
2588
if ready {
@@ -2587,7 +2591,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety
2587
2591
handleProbeSync (kl , update , handler , "readiness" , status )
2588
2592
case update := <- kl .startupManager .Updates ():
2589
2593
started := update .Result == proberesults .Success
2590
- kl .statusManager .SetContainerStartup (update .PodUID , update .ContainerID , started )
2594
+ kl .statusManager .SetContainerStartup (logger , update .PodUID , update .ContainerID , started )
2591
2595
2592
2596
status := "unhealthy"
2593
2597
if started {
0 commit comments