Skip to content

Commit 9d1363e

Browse files
authored
Merge pull request kubernetes#130852 from Chulong-Li/contextual-logging
Migrate pkg/kubelet/status to contextual logging
2 parents c2d40b7 + d868eeb commit 9d1363e

12 files changed

+307
-247
lines changed

hack/golangci-hints.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,7 @@ linters-settings: # please keep this alphabetized
177177
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
178178
contextual k8s.io/kubernetes/pkg/kubelet/cadvisor/.*
179179
contextual k8s.io/kubernetes/pkg/kubelet/oom/.*
180+
contextual k8s.io/kubernetes/pkg/kubelet/status/.*
180181
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
181182
182183
# As long as contextual logging is alpha or beta, all WithName, WithValues,

hack/golangci.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,7 @@ linters-settings: # please keep this alphabetized
243243
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
244244
contextual k8s.io/kubernetes/pkg/kubelet/cadvisor/.*
245245
contextual k8s.io/kubernetes/pkg/kubelet/oom/.*
246+
contextual k8s.io/kubernetes/pkg/kubelet/status/.*
246247
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
247248
248249
# As long as contextual logging is alpha or beta, all WithName, WithValues,

hack/logcheck.conf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
5555
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
5656
contextual k8s.io/kubernetes/pkg/kubelet/cadvisor/.*
5757
contextual k8s.io/kubernetes/pkg/kubelet/oom/.*
58+
contextual k8s.io/kubernetes/pkg/kubelet/status/.*
5859
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
5960

6061
# As long as contextual logging is alpha or beta, all WithName, WithValues,

pkg/kubelet/kubelet.go

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1810,7 +1810,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
18101810
}
18111811

18121812
// Start component sync loops.
1813-
kl.statusManager.Start()
1813+
kl.statusManager.Start(ctx)
18141814

18151815
// Start syncing RuntimeClasses if enabled.
18161816
if kl.runtimeClassManager != nil {
@@ -1889,6 +1889,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
18891889
attribute.String("k8s.pod.update_type", updateType.String()),
18901890
semconv.K8SNamespaceNameKey.String(pod.Namespace),
18911891
))
1892+
logger := klog.FromContext(ctx)
18921893
klog.V(4).InfoS("SyncPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
18931894
defer func() {
18941895
if err != nil {
@@ -1949,7 +1950,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
19491950

19501951
// If the pod is terminal, we don't need to continue to setup the pod
19511952
if apiPodStatus.Phase == v1.PodSucceeded || apiPodStatus.Phase == v1.PodFailed {
1952-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
1953+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
19531954
isTerminal = true
19541955
return isTerminal, nil
19551956
}
@@ -1962,7 +1963,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
19621963
metrics.PodStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
19631964
}
19641965

1965-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
1966+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
19661967

19671968
// If the network plugin is not ready, only start the pod if it uses the host network
19681969
if err := kl.runtimeState.networkErrors(); err != nil && !kubecontainer.IsHostNetworkPod(pod) {
@@ -2100,6 +2101,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
21002101
semconv.K8SPodNameKey.String(pod.Name),
21012102
semconv.K8SNamespaceNameKey.String(pod.Namespace),
21022103
))
2104+
logger := klog.FromContext(ctx)
21032105
defer otelSpan.End()
21042106
klog.V(4).InfoS("SyncTerminatingPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
21052107
defer klog.V(4).InfoS("SyncTerminatingPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
@@ -2113,7 +2115,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
21132115
if podStatusFn != nil {
21142116
podStatusFn(&apiPodStatus)
21152117
}
2116-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
2118+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
21172119

21182120
if gracePeriod != nil {
21192121
klog.V(4).InfoS("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", *gracePeriod)
@@ -2188,7 +2190,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
21882190
// information about the container end states (including exit codes) - when
21892191
// SyncTerminatedPod is called the containers may already be removed.
21902192
apiPodStatus = kl.generateAPIPodStatus(pod, stoppedPodStatus, true)
2191-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
2193+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
21922194

21932195
// we have successfully stopped all containers, the pod is terminating, our status is "done"
21942196
klog.V(4).InfoS("Pod termination stopped all running containers", "pod", klog.KObj(pod), "podUID", pod.UID)
@@ -2250,6 +2252,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
22502252
semconv.K8SPodNameKey.String(pod.Name),
22512253
semconv.K8SNamespaceNameKey.String(pod.Namespace),
22522254
))
2255+
logger := klog.FromContext(ctx)
22532256
defer otelSpan.End()
22542257
klog.V(4).InfoS("SyncTerminatedPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
22552258
defer klog.V(4).InfoS("SyncTerminatedPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
@@ -2263,7 +2266,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
22632266
// TODO: should we simply fold this into TerminatePod? that would give a single pod update
22642267
apiPodStatus := kl.generateAPIPodStatus(pod, podStatus, true)
22652268

2266-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
2269+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
22672270

22682271
// volumes are unmounted after the pod worker reports ShouldPodRuntimeBeRemoved (which is satisfied
22692272
// before syncTerminatedPod is invoked)
@@ -2310,7 +2313,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
23102313
kl.usernsManager.Release(pod.UID)
23112314

23122315
// mark the final pod status
2313-
kl.statusManager.TerminatePod(pod)
2316+
kl.statusManager.TerminatePod(logger, pod)
23142317
klog.V(4).InfoS("Pod is terminated and will need no more status updates", "pod", klog.KObj(pod), "podUID", pod.UID)
23152318

23162319
return nil
@@ -2380,7 +2383,7 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
23802383
// and updates the pod to the failed phase in the status manager.
23812384
func (kl *Kubelet) rejectPod(pod *v1.Pod, reason, message string) {
23822385
kl.recorder.Eventf(pod, v1.EventTypeWarning, reason, message)
2383-
kl.statusManager.SetPodStatus(pod, v1.PodStatus{
2386+
kl.statusManager.SetPodStatus(klog.TODO(), pod, v1.PodStatus{
23842387
QOSClass: v1qos.GetPodQOS(pod), // keep it as is
23852388
Phase: v1.PodFailed,
23862389
Reason: reason,
@@ -2508,6 +2511,7 @@ func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpd
25082511
// containers have failed health checks
25092512
func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
25102513
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {
2514+
logger := klog.FromContext(ctx)
25112515
select {
25122516
case u, open := <-configCh:
25132517
// Update from a config source; dispatch it to the right handler
@@ -2578,7 +2582,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety
25782582
}
25792583
case update := <-kl.readinessManager.Updates():
25802584
ready := update.Result == proberesults.Success
2581-
kl.statusManager.SetContainerReadiness(update.PodUID, update.ContainerID, ready)
2585+
kl.statusManager.SetContainerReadiness(logger, update.PodUID, update.ContainerID, ready)
25822586

25832587
status := "not ready"
25842588
if ready {
@@ -2587,7 +2591,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety
25872591
handleProbeSync(kl, update, handler, "readiness", status)
25882592
case update := <-kl.startupManager.Updates():
25892593
started := update.Result == proberesults.Success
2590-
kl.statusManager.SetContainerStartup(update.PodUID, update.ContainerID, started)
2594+
kl.statusManager.SetContainerStartup(logger, update.PodUID, update.ContainerID, started)
25912595

25922596
status := "unhealthy"
25932597
if started {

pkg/kubelet/kubelet_pods.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1113,7 +1113,7 @@ func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Po
11131113
for _, pod := range mirrorPods {
11141114
podUIDs[pod.UID] = true
11151115
}
1116-
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
1116+
kl.statusManager.RemoveOrphanedStatuses(klog.TODO(), podUIDs)
11171117
}
11181118

11191119
// HandlePodCleanups performs a series of cleanup work, including terminating

pkg/kubelet/kubelet_pods_test.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ import (
4747
featuregatetesting "k8s.io/component-base/featuregate/testing"
4848
"k8s.io/component-base/metrics/testutil"
4949
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
50+
"k8s.io/klog/v2/ktesting"
5051
"k8s.io/kubelet/pkg/cri/streaming/portforward"
5152
"k8s.io/kubelet/pkg/cri/streaming/remotecommand"
5253
_ "k8s.io/kubernetes/pkg/apis/core/install"
@@ -3866,11 +3867,12 @@ func Test_generateAPIPodStatus(t *testing.T) {
38663867
for _, test := range tests {
38673868
for _, enablePodReadyToStartContainersCondition := range []bool{false, true} {
38683869
t.Run(test.name, func(t *testing.T) {
3870+
logger, _ := ktesting.NewTestContext(t)
38693871
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition)
38703872
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
38713873
defer testKubelet.Cleanup()
38723874
kl := testKubelet.kubelet
3873-
kl.statusManager.SetPodStatus(test.pod, test.previousStatus)
3875+
kl.statusManager.SetPodStatus(logger, test.pod, test.previousStatus)
38743876
for _, name := range test.unreadyContainer {
38753877
kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
38763878
}
@@ -3987,12 +3989,13 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
39873989
}
39883990
for _, test := range tests {
39893991
t.Run(test.name, func(t *testing.T) {
3992+
logger, _ := ktesting.NewTestContext(t)
39903993
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
39913994
defer testKubelet.Cleanup()
39923995
kl := testKubelet.kubelet
39933996

39943997
oldStatus := test.pod.Status
3995-
kl.statusManager.SetPodStatus(test.pod, oldStatus)
3998+
kl.statusManager.SetPodStatus(logger, test.pod, oldStatus)
39963999
actual := kl.generateAPIPodStatus(test.pod, &testKubecontainerPodStatus /* criStatus */, false /* test.isPodTerminal */)
39974000
for _, c := range actual.Conditions {
39984001
if c.Type == v1.PodResizePending || c.Type == v1.PodResizeInProgress {

pkg/kubelet/kubelet_test.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1523,6 +1523,7 @@ func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
15231523
}
15241524

15251525
func TestFilterOutInactivePods(t *testing.T) {
1526+
logger, _ := ktesting.NewTestContext(t)
15261527
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
15271528
defer testKubelet.Cleanup()
15281529
kubelet := testKubelet.kubelet
@@ -1550,7 +1551,7 @@ func TestFilterOutInactivePods(t *testing.T) {
15501551

15511552
// pod that is running but has been rejected by admission is excluded
15521553
pods[5].Status.Phase = v1.PodRunning
1553-
kubelet.statusManager.SetPodStatus(pods[5], v1.PodStatus{Phase: v1.PodFailed})
1554+
kubelet.statusManager.SetPodStatus(logger, pods[5], v1.PodStatus{Phase: v1.PodFailed})
15541555

15551556
// pod that is running according to the api but is known terminated is excluded
15561557
pods[6].Status.Phase = v1.PodRunning
@@ -1839,6 +1840,7 @@ func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod,
18391840
}
18401841

18411842
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
1843+
logger, _ := ktesting.NewTestContext(t)
18421844
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
18431845
defer testKubelet.Cleanup()
18441846
kl := testKubelet.kubelet
@@ -1850,8 +1852,8 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
18501852
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
18511853
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
18521854
// deleted.
1853-
kl.statusManager.SetPodStatus(pods[1], v1.PodStatus{Phase: v1.PodFailed})
1854-
kl.statusManager.SetPodStatus(pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
1855+
kl.statusManager.SetPodStatus(logger, pods[1], v1.PodStatus{Phase: v1.PodFailed})
1856+
kl.statusManager.SetPodStatus(logger, pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
18551857
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
18561858
}
18571859

pkg/kubelet/prober/prober_manager_test.go

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,11 @@ import (
2727
"k8s.io/apimachinery/pkg/types"
2828
"k8s.io/apimachinery/pkg/util/sets"
2929
"k8s.io/apimachinery/pkg/util/wait"
30+
"k8s.io/klog/v2"
3031
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
3132
"k8s.io/kubernetes/pkg/kubelet/prober/results"
3233
"k8s.io/kubernetes/pkg/probe"
34+
"k8s.io/kubernetes/test/utils/ktesting"
3335
)
3436

3537
func init() {
@@ -547,22 +549,23 @@ func TestUpdatePodStatusWithInitContainers(t *testing.T) {
547549
}
548550
}
549551

550-
func (m *manager) extractedReadinessHandling() {
552+
func (m *manager) extractedReadinessHandling(logger klog.Logger) {
551553
update := <-m.readinessManager.Updates()
552554
// This code corresponds to an extract from kubelet.syncLoopIteration()
553555
ready := update.Result == results.Success
554-
m.statusManager.SetContainerReadiness(update.PodUID, update.ContainerID, ready)
556+
m.statusManager.SetContainerReadiness(logger, update.PodUID, update.ContainerID, ready)
555557
}
556558

557559
func TestUpdateReadiness(t *testing.T) {
560+
logger, _ := ktesting.NewTestContext(t)
558561
testPod := getTestPod()
559562
setTestProbe(testPod, readiness, v1.Probe{})
560563
m := newTestManager()
561564
defer cleanup(t, m)
562565

563566
// Start syncing readiness without leaking goroutine.
564567
stopCh := make(chan struct{})
565-
go wait.Until(m.extractedReadinessHandling, 0, stopCh)
568+
go wait.Until(func() { m.extractedReadinessHandling(logger) }, 0, stopCh)
566569
defer func() {
567570
close(stopCh)
568571
// Send an update to exit extractedReadinessHandling()
@@ -573,7 +576,7 @@ func TestUpdateReadiness(t *testing.T) {
573576
exec.set(probe.Success, nil)
574577
m.prober.exec = &exec
575578

576-
m.statusManager.SetPodStatus(testPod, getTestRunningStatus())
579+
m.statusManager.SetPodStatus(logger, testPod, getTestRunningStatus())
577580

578581
m.AddPod(testPod)
579582
probePaths := []probeKey{{testPodUID, testContainerName, readiness}}

pkg/kubelet/prober/scale_test.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ import (
3737
"k8s.io/kubernetes/pkg/kubelet/status"
3838
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
3939
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
40+
"k8s.io/kubernetes/test/utils/ktesting"
4041
"k8s.io/utils/ptr"
4142
)
4243

@@ -80,6 +81,7 @@ func TestTCPPortExhaustion(t *testing.T) {
8081
}
8182
for _, tt := range tests {
8283
t.Run(tt.name, func(t *testing.T) {
84+
logger, _ := ktesting.NewTestContext(t)
8385
podManager := kubepod.NewBasicPodManager()
8486
podStartupLatencyTracker := kubeletutil.NewPodStartupLatencyTracker()
8587
m := NewManager(
@@ -133,7 +135,7 @@ func TestTCPPortExhaustion(t *testing.T) {
133135
})
134136
}
135137
podManager.AddPod(&pod)
136-
m.statusManager.SetPodStatus(&pod, pod.Status)
138+
m.statusManager.SetPodStatus(logger, &pod, pod.Status)
137139
m.AddPod(&pod)
138140
}
139141
t.Logf("Adding %d pods with %d containers each in %v", numTestPods, numContainers, time.Since(now))

0 commit comments

Comments
 (0)