Skip to content

Commit d868eeb

Browse files
committed
Migrate pkg/kubelet/status to contextual logging
Update with logger from context Fix nil pointer issue in TestUpdateReadiness
1 parent aa35eff commit d868eeb

12 files changed

+307
-247
lines changed

hack/golangci-hints.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,7 @@ linters-settings: # please keep this alphabetized
177177
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
178178
contextual k8s.io/kubernetes/pkg/kubelet/cadvisor/.*
179179
contextual k8s.io/kubernetes/pkg/kubelet/oom/.*
180+
contextual k8s.io/kubernetes/pkg/kubelet/status/.*
180181
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
181182
182183
# As long as contextual logging is alpha or beta, all WithName, WithValues,

hack/golangci.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,7 @@ linters-settings: # please keep this alphabetized
243243
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
244244
contextual k8s.io/kubernetes/pkg/kubelet/cadvisor/.*
245245
contextual k8s.io/kubernetes/pkg/kubelet/oom/.*
246+
contextual k8s.io/kubernetes/pkg/kubelet/status/.*
246247
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
247248
248249
# As long as contextual logging is alpha or beta, all WithName, WithValues,

hack/logcheck.conf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
5555
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
5656
contextual k8s.io/kubernetes/pkg/kubelet/cadvisor/.*
5757
contextual k8s.io/kubernetes/pkg/kubelet/oom/.*
58+
contextual k8s.io/kubernetes/pkg/kubelet/status/.*
5859
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
5960

6061
# As long as contextual logging is alpha or beta, all WithName, WithValues,

pkg/kubelet/kubelet.go

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1809,7 +1809,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
18091809
}
18101810

18111811
// Start component sync loops.
1812-
kl.statusManager.Start()
1812+
kl.statusManager.Start(ctx)
18131813

18141814
// Start syncing RuntimeClasses if enabled.
18151815
if kl.runtimeClassManager != nil {
@@ -1888,6 +1888,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
18881888
attribute.String("k8s.pod.update_type", updateType.String()),
18891889
semconv.K8SNamespaceNameKey.String(pod.Namespace),
18901890
))
1891+
logger := klog.FromContext(ctx)
18911892
klog.V(4).InfoS("SyncPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
18921893
defer func() {
18931894
if err != nil {
@@ -1948,7 +1949,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
19481949

19491950
// If the pod is terminal, we don't need to continue to setup the pod
19501951
if apiPodStatus.Phase == v1.PodSucceeded || apiPodStatus.Phase == v1.PodFailed {
1951-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
1952+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
19521953
isTerminal = true
19531954
return isTerminal, nil
19541955
}
@@ -1961,7 +1962,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
19611962
metrics.PodStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
19621963
}
19631964

1964-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
1965+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
19651966

19661967
// If the network plugin is not ready, only start the pod if it uses the host network
19671968
if err := kl.runtimeState.networkErrors(); err != nil && !kubecontainer.IsHostNetworkPod(pod) {
@@ -2099,6 +2100,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
20992100
semconv.K8SPodNameKey.String(pod.Name),
21002101
semconv.K8SNamespaceNameKey.String(pod.Namespace),
21012102
))
2103+
logger := klog.FromContext(ctx)
21022104
defer otelSpan.End()
21032105
klog.V(4).InfoS("SyncTerminatingPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
21042106
defer klog.V(4).InfoS("SyncTerminatingPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
@@ -2112,7 +2114,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
21122114
if podStatusFn != nil {
21132115
podStatusFn(&apiPodStatus)
21142116
}
2115-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
2117+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
21162118

21172119
if gracePeriod != nil {
21182120
klog.V(4).InfoS("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", *gracePeriod)
@@ -2187,7 +2189,7 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
21872189
// information about the container end states (including exit codes) - when
21882190
// SyncTerminatedPod is called the containers may already be removed.
21892191
apiPodStatus = kl.generateAPIPodStatus(pod, stoppedPodStatus, true)
2190-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
2192+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
21912193

21922194
// we have successfully stopped all containers, the pod is terminating, our status is "done"
21932195
klog.V(4).InfoS("Pod termination stopped all running containers", "pod", klog.KObj(pod), "podUID", pod.UID)
@@ -2249,6 +2251,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
22492251
semconv.K8SPodNameKey.String(pod.Name),
22502252
semconv.K8SNamespaceNameKey.String(pod.Namespace),
22512253
))
2254+
logger := klog.FromContext(ctx)
22522255
defer otelSpan.End()
22532256
klog.V(4).InfoS("SyncTerminatedPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
22542257
defer klog.V(4).InfoS("SyncTerminatedPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
@@ -2262,7 +2265,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
22622265
// TODO: should we simply fold this into TerminatePod? that would give a single pod update
22632266
apiPodStatus := kl.generateAPIPodStatus(pod, podStatus, true)
22642267

2265-
kl.statusManager.SetPodStatus(pod, apiPodStatus)
2268+
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
22662269

22672270
// volumes are unmounted after the pod worker reports ShouldPodRuntimeBeRemoved (which is satisfied
22682271
// before syncTerminatedPod is invoked)
@@ -2309,7 +2312,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
23092312
kl.usernsManager.Release(pod.UID)
23102313

23112314
// mark the final pod status
2312-
kl.statusManager.TerminatePod(pod)
2315+
kl.statusManager.TerminatePod(logger, pod)
23132316
klog.V(4).InfoS("Pod is terminated and will need no more status updates", "pod", klog.KObj(pod), "podUID", pod.UID)
23142317

23152318
return nil
@@ -2379,7 +2382,7 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
23792382
// and updates the pod to the failed phase in the status manager.
23802383
func (kl *Kubelet) rejectPod(pod *v1.Pod, reason, message string) {
23812384
kl.recorder.Eventf(pod, v1.EventTypeWarning, reason, message)
2382-
kl.statusManager.SetPodStatus(pod, v1.PodStatus{
2385+
kl.statusManager.SetPodStatus(klog.TODO(), pod, v1.PodStatus{
23832386
QOSClass: v1qos.GetPodQOS(pod), // keep it as is
23842387
Phase: v1.PodFailed,
23852388
Reason: reason,
@@ -2507,6 +2510,7 @@ func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpd
25072510
// containers have failed health checks
25082511
func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
25092512
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {
2513+
logger := klog.FromContext(ctx)
25102514
select {
25112515
case u, open := <-configCh:
25122516
// Update from a config source; dispatch it to the right handler
@@ -2577,7 +2581,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety
25772581
}
25782582
case update := <-kl.readinessManager.Updates():
25792583
ready := update.Result == proberesults.Success
2580-
kl.statusManager.SetContainerReadiness(update.PodUID, update.ContainerID, ready)
2584+
kl.statusManager.SetContainerReadiness(logger, update.PodUID, update.ContainerID, ready)
25812585

25822586
status := "not ready"
25832587
if ready {
@@ -2586,7 +2590,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety
25862590
handleProbeSync(kl, update, handler, "readiness", status)
25872591
case update := <-kl.startupManager.Updates():
25882592
started := update.Result == proberesults.Success
2589-
kl.statusManager.SetContainerStartup(update.PodUID, update.ContainerID, started)
2593+
kl.statusManager.SetContainerStartup(logger, update.PodUID, update.ContainerID, started)
25902594

25912595
status := "unhealthy"
25922596
if started {

pkg/kubelet/kubelet_pods.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1113,7 +1113,7 @@ func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Po
11131113
for _, pod := range mirrorPods {
11141114
podUIDs[pod.UID] = true
11151115
}
1116-
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
1116+
kl.statusManager.RemoveOrphanedStatuses(klog.TODO(), podUIDs)
11171117
}
11181118

11191119
// HandlePodCleanups performs a series of cleanup work, including terminating

pkg/kubelet/kubelet_pods_test.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ import (
4747
featuregatetesting "k8s.io/component-base/featuregate/testing"
4848
"k8s.io/component-base/metrics/testutil"
4949
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
50+
"k8s.io/klog/v2/ktesting"
5051
"k8s.io/kubelet/pkg/cri/streaming/portforward"
5152
"k8s.io/kubelet/pkg/cri/streaming/remotecommand"
5253
_ "k8s.io/kubernetes/pkg/apis/core/install"
@@ -3800,11 +3801,12 @@ func Test_generateAPIPodStatus(t *testing.T) {
38003801
for _, test := range tests {
38013802
for _, enablePodReadyToStartContainersCondition := range []bool{false, true} {
38023803
t.Run(test.name, func(t *testing.T) {
3804+
logger, _ := ktesting.NewTestContext(t)
38033805
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition)
38043806
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
38053807
defer testKubelet.Cleanup()
38063808
kl := testKubelet.kubelet
3807-
kl.statusManager.SetPodStatus(test.pod, test.previousStatus)
3809+
kl.statusManager.SetPodStatus(logger, test.pod, test.previousStatus)
38083810
for _, name := range test.unreadyContainer {
38093811
kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
38103812
}
@@ -3921,12 +3923,13 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
39213923
}
39223924
for _, test := range tests {
39233925
t.Run(test.name, func(t *testing.T) {
3926+
logger, _ := ktesting.NewTestContext(t)
39243927
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
39253928
defer testKubelet.Cleanup()
39263929
kl := testKubelet.kubelet
39273930

39283931
oldStatus := test.pod.Status
3929-
kl.statusManager.SetPodStatus(test.pod, oldStatus)
3932+
kl.statusManager.SetPodStatus(logger, test.pod, oldStatus)
39303933
actual := kl.generateAPIPodStatus(test.pod, &testKubecontainerPodStatus /* criStatus */, false /* test.isPodTerminal */)
39313934
for _, c := range actual.Conditions {
39323935
if c.Type == v1.PodResizePending || c.Type == v1.PodResizeInProgress {

pkg/kubelet/kubelet_test.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1523,6 +1523,7 @@ func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
15231523
}
15241524

15251525
func TestFilterOutInactivePods(t *testing.T) {
1526+
logger, _ := ktesting.NewTestContext(t)
15261527
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
15271528
defer testKubelet.Cleanup()
15281529
kubelet := testKubelet.kubelet
@@ -1550,7 +1551,7 @@ func TestFilterOutInactivePods(t *testing.T) {
15501551

15511552
// pod that is running but has been rejected by admission is excluded
15521553
pods[5].Status.Phase = v1.PodRunning
1553-
kubelet.statusManager.SetPodStatus(pods[5], v1.PodStatus{Phase: v1.PodFailed})
1554+
kubelet.statusManager.SetPodStatus(logger, pods[5], v1.PodStatus{Phase: v1.PodFailed})
15541555

15551556
// pod that is running according to the api but is known terminated is excluded
15561557
pods[6].Status.Phase = v1.PodRunning
@@ -1839,6 +1840,7 @@ func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod,
18391840
}
18401841

18411842
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
1843+
logger, _ := ktesting.NewTestContext(t)
18421844
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
18431845
defer testKubelet.Cleanup()
18441846
kl := testKubelet.kubelet
@@ -1850,8 +1852,8 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
18501852
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
18511853
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
18521854
// deleted.
1853-
kl.statusManager.SetPodStatus(pods[1], v1.PodStatus{Phase: v1.PodFailed})
1854-
kl.statusManager.SetPodStatus(pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
1855+
kl.statusManager.SetPodStatus(logger, pods[1], v1.PodStatus{Phase: v1.PodFailed})
1856+
kl.statusManager.SetPodStatus(logger, pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
18551857
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
18561858
}
18571859

pkg/kubelet/prober/prober_manager_test.go

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,11 @@ import (
2727
"k8s.io/apimachinery/pkg/types"
2828
"k8s.io/apimachinery/pkg/util/sets"
2929
"k8s.io/apimachinery/pkg/util/wait"
30+
"k8s.io/klog/v2"
3031
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
3132
"k8s.io/kubernetes/pkg/kubelet/prober/results"
3233
"k8s.io/kubernetes/pkg/probe"
34+
"k8s.io/kubernetes/test/utils/ktesting"
3335
)
3436

3537
func init() {
@@ -547,22 +549,23 @@ func TestUpdatePodStatusWithInitContainers(t *testing.T) {
547549
}
548550
}
549551

550-
func (m *manager) extractedReadinessHandling() {
552+
func (m *manager) extractedReadinessHandling(logger klog.Logger) {
551553
update := <-m.readinessManager.Updates()
552554
// This code corresponds to an extract from kubelet.syncLoopIteration()
553555
ready := update.Result == results.Success
554-
m.statusManager.SetContainerReadiness(update.PodUID, update.ContainerID, ready)
556+
m.statusManager.SetContainerReadiness(logger, update.PodUID, update.ContainerID, ready)
555557
}
556558

557559
func TestUpdateReadiness(t *testing.T) {
560+
logger, _ := ktesting.NewTestContext(t)
558561
testPod := getTestPod()
559562
setTestProbe(testPod, readiness, v1.Probe{})
560563
m := newTestManager()
561564
defer cleanup(t, m)
562565

563566
// Start syncing readiness without leaking goroutine.
564567
stopCh := make(chan struct{})
565-
go wait.Until(m.extractedReadinessHandling, 0, stopCh)
568+
go wait.Until(func() { m.extractedReadinessHandling(logger) }, 0, stopCh)
566569
defer func() {
567570
close(stopCh)
568571
// Send an update to exit extractedReadinessHandling()
@@ -573,7 +576,7 @@ func TestUpdateReadiness(t *testing.T) {
573576
exec.set(probe.Success, nil)
574577
m.prober.exec = &exec
575578

576-
m.statusManager.SetPodStatus(testPod, getTestRunningStatus())
579+
m.statusManager.SetPodStatus(logger, testPod, getTestRunningStatus())
577580

578581
m.AddPod(testPod)
579582
probePaths := []probeKey{{testPodUID, testContainerName, readiness}}

pkg/kubelet/prober/scale_test.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ import (
3737
"k8s.io/kubernetes/pkg/kubelet/status"
3838
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
3939
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
40+
"k8s.io/kubernetes/test/utils/ktesting"
4041
"k8s.io/utils/ptr"
4142
)
4243

@@ -80,6 +81,7 @@ func TestTCPPortExhaustion(t *testing.T) {
8081
}
8182
for _, tt := range tests {
8283
t.Run(tt.name, func(t *testing.T) {
84+
logger, _ := ktesting.NewTestContext(t)
8385
podManager := kubepod.NewBasicPodManager()
8486
podStartupLatencyTracker := kubeletutil.NewPodStartupLatencyTracker()
8587
m := NewManager(
@@ -133,7 +135,7 @@ func TestTCPPortExhaustion(t *testing.T) {
133135
})
134136
}
135137
podManager.AddPod(&pod)
136-
m.statusManager.SetPodStatus(&pod, pod.Status)
138+
m.statusManager.SetPodStatus(logger, &pod, pod.Status)
137139
m.AddPod(&pod)
138140
}
139141
t.Logf("Adding %d pods with %d containers each in %v", numTestPods, numContainers, time.Since(now))

0 commit comments

Comments
 (0)