Skip to content

Commit 5ac315f

Browse files
authored
Merge pull request kubernetes#126494 from bart0sh/PR153-migrate-DRA-Manager-to-contextual-logging
Migrate pkg/kubelet/cm/dra to contextual logging
2 parents 0bcbc3b + e1bc8de commit 5ac315f

20 files changed

+91
-80
lines changed

hack/golangci-hints.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ linters-settings: # please keep this alphabetized
151151
contextual k8s.io/kubernetes/pkg/controller/.*
152152
contextual k8s.io/kubernetes/pkg/scheduler/.*
153153
contextual k8s.io/kubernetes/test/e2e/dra/.*
154+
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
154155
155156
# As long as contextual logging is alpha or beta, all WithName, WithValues,
156157
# NewContext calls have to go through klog. Once it is GA, we can lift

hack/golangci-strict.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -197,6 +197,7 @@ linters-settings: # please keep this alphabetized
197197
contextual k8s.io/kubernetes/pkg/controller/.*
198198
contextual k8s.io/kubernetes/pkg/scheduler/.*
199199
contextual k8s.io/kubernetes/test/e2e/dra/.*
200+
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
200201
201202
# As long as contextual logging is alpha or beta, all WithName, WithValues,
202203
# NewContext calls have to go through klog. Once it is GA, we can lift

hack/golangci.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,7 @@ linters-settings: # please keep this alphabetized
200200
contextual k8s.io/kubernetes/pkg/controller/.*
201201
contextual k8s.io/kubernetes/pkg/scheduler/.*
202202
contextual k8s.io/kubernetes/test/e2e/dra/.*
203+
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
203204
204205
# As long as contextual logging is alpha or beta, all WithName, WithValues,
205206
# NewContext calls have to go through klog. Once it is GA, we can lift

hack/logcheck.conf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ contextual k8s.io/kubernetes/cmd/kube-scheduler/.*
4747
contextual k8s.io/kubernetes/pkg/controller/.*
4848
contextual k8s.io/kubernetes/pkg/scheduler/.*
4949
contextual k8s.io/kubernetes/test/e2e/dra/.*
50+
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
5051

5152
# As long as contextual logging is alpha or beta, all WithName, WithValues,
5253
# NewContext calls have to go through klog. Once it is GA, we can lift

pkg/kubelet/cm/container_manager.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ limitations under the License.
1717
package cm
1818

1919
import (
20+
"context"
2021
"fmt"
2122
"strconv"
2223
"strings"
@@ -55,7 +56,7 @@ type ContainerManager interface {
5556
// Runs the container manager's housekeeping.
5657
// - Ensures that the Docker daemon is in a container.
5758
// - Creates the system container where all non-containerized processes run.
58-
Start(*v1.Node, ActivePodsFunc, config.SourcesReady, status.PodStatusProvider, internalapi.RuntimeService, bool) error
59+
Start(context.Context, *v1.Node, ActivePodsFunc, config.SourcesReady, status.PodStatusProvider, internalapi.RuntimeService, bool) error
5960

6061
// SystemCgroupsLimit returns resources allocated to system cgroups in the machine.
6162
// These cgroups include the system and Kubernetes services.
@@ -94,7 +95,7 @@ type ContainerManager interface {
9495

9596
// GetResources returns RunContainerOptions with devices, mounts, and env fields populated for
9697
// extended resources required by container.
97-
GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error)
98+
GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error)
9899

99100
// UpdatePluginResources calls Allocate of device plugin handler for potential
100101
// requests for device plugin resources, and returns an error if fails.
@@ -124,10 +125,10 @@ type ContainerManager interface {
124125
GetNodeAllocatableAbsolute() v1.ResourceList
125126

126127
// PrepareDynamicResource prepares dynamic pod resources
127-
PrepareDynamicResources(*v1.Pod) error
128+
PrepareDynamicResources(context.Context, *v1.Pod) error
128129

129130
// UnprepareDynamicResources unprepares dynamic pod resources
130-
UnprepareDynamicResources(*v1.Pod) error
131+
UnprepareDynamicResources(context.Context, *v1.Pod) error
131132

132133
// PodMightNeedToUnprepareResources returns true if the pod with the given UID
133134
// might need to unprepare resources.

pkg/kubelet/cm/container_manager_linux.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -553,19 +553,18 @@ func (cm *containerManagerImpl) Status() Status {
553553
return cm.status
554554
}
555555

556-
func (cm *containerManagerImpl) Start(node *v1.Node,
556+
func (cm *containerManagerImpl) Start(ctx context.Context, node *v1.Node,
557557
activePods ActivePodsFunc,
558558
sourcesReady config.SourcesReady,
559559
podStatusProvider status.PodStatusProvider,
560560
runtimeService internalapi.RuntimeService,
561561
localStorageCapacityIsolation bool) error {
562-
ctx := context.Background()
563562

564563
containerMap, containerRunningSet := buildContainerMapAndRunningSetFromRuntime(ctx, runtimeService)
565564

566565
// Initialize DRA manager
567566
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
568-
err := cm.draManager.Start(dra.ActivePodsFunc(activePods), sourcesReady)
567+
err := cm.draManager.Start(ctx, dra.ActivePodsFunc(activePods), sourcesReady)
569568
if err != nil {
570569
return fmt.Errorf("start dra manager error: %w", err)
571570
}
@@ -655,13 +654,15 @@ func (cm *containerManagerImpl) GetPluginRegistrationHandler() cache.PluginHandl
655654
}
656655

657656
// TODO: move the GetResources logic to PodContainerManager.
658-
func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
657+
func (cm *containerManagerImpl) GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
658+
logger := klog.FromContext(ctx)
659659
opts := &kubecontainer.RunContainerOptions{}
660660
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
661661
resOpts, err := cm.draManager.GetResources(pod, container)
662662
if err != nil {
663663
return nil, err
664664
}
665+
logger.V(5).Info("Determined CDI devices for pod", "pod", klog.KObj(pod), "cdiDevices", resOpts.CDIDevices)
665666
opts.CDIDevices = append(opts.CDIDevices, resOpts.CDIDevices...)
666667
}
667668
// Allocate should already be called during predicateAdmitHandler.Admit(),
@@ -1017,12 +1018,12 @@ func containerMemoryFromBlock(blocks []memorymanagerstate.Block) []*podresources
10171018
return containerMemories
10181019
}
10191020

1020-
func (cm *containerManagerImpl) PrepareDynamicResources(pod *v1.Pod) error {
1021-
return cm.draManager.PrepareResources(pod)
1021+
func (cm *containerManagerImpl) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
1022+
return cm.draManager.PrepareResources(ctx, pod)
10221023
}
10231024

1024-
func (cm *containerManagerImpl) UnprepareDynamicResources(pod *v1.Pod) error {
1025-
return cm.draManager.UnprepareResources(pod)
1025+
func (cm *containerManagerImpl) UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
1026+
return cm.draManager.UnprepareResources(ctx, pod)
10261027
}
10271028

10281029
func (cm *containerManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {

pkg/kubelet/cm/container_manager_stub.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ limitations under the License.
1717
package cm
1818

1919
import (
20+
"context"
2021
"fmt"
2122

2223
v1 "k8s.io/api/core/v1"
@@ -45,7 +46,7 @@ type containerManagerStub struct {
4546

4647
var _ ContainerManager = &containerManagerStub{}
4748

48-
func (cm *containerManagerStub) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService, _ bool) error {
49+
func (cm *containerManagerStub) Start(_ context.Context, _ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService, _ bool) error {
4950
klog.V(2).InfoS("Starting stub container manager")
5051
return nil
5152
}
@@ -110,7 +111,7 @@ func (cm *containerManagerStub) NewPodContainerManager() PodContainerManager {
110111
return &podContainerManagerStub{}
111112
}
112113

113-
func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
114+
func (cm *containerManagerStub) GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
114115
return &kubecontainer.RunContainerOptions{}, nil
115116
}
116117

@@ -170,11 +171,11 @@ func (cm *containerManagerStub) GetNodeAllocatableAbsolute() v1.ResourceList {
170171
return nil
171172
}
172173

173-
func (cm *containerManagerStub) PrepareDynamicResources(pod *v1.Pod) error {
174+
func (cm *containerManagerStub) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
174175
return nil
175176
}
176177

177-
func (cm *containerManagerStub) UnprepareDynamicResources(*v1.Pod) error {
178+
func (cm *containerManagerStub) UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
178179
return nil
179180
}
180181

pkg/kubelet/cm/container_manager_unsupported.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ limitations under the License.
2020
package cm
2121

2222
import (
23+
"context"
2324
"fmt"
2425

2526
"k8s.io/mount-utils"
@@ -39,7 +40,7 @@ type unsupportedContainerManager struct {
3940

4041
var _ ContainerManager = &unsupportedContainerManager{}
4142

42-
func (unsupportedContainerManager) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService, _ bool) error {
43+
func (unsupportedContainerManager) Start(_ context.Context, _ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService, _ bool) error {
4344
return fmt.Errorf("Container Manager is unsupported in this build")
4445
}
4546

pkg/kubelet/cm/container_manager_windows.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ func (ra *noopWindowsResourceAllocator) Admit(attrs *lifecycle.PodAdmitAttribute
7070
return admission.GetPodAdmitResult(nil)
7171
}
7272

73-
func (cm *containerManagerImpl) Start(node *v1.Node,
73+
func (cm *containerManagerImpl) Start(ctx context.Context, node *v1.Node,
7474
activePods ActivePodsFunc,
7575
sourcesReady config.SourcesReady,
7676
podStatusProvider status.PodStatusProvider,
@@ -88,7 +88,6 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
8888
}
8989
}
9090

91-
ctx := context.Background()
9291
containerMap, containerRunningSet := buildContainerMapAndRunningSetFromRuntime(ctx, runtimeService)
9392

9493
// Starts device manager.
@@ -189,7 +188,7 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
189188
return &podContainerManagerStub{}
190189
}
191190

192-
func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
191+
func (cm *containerManagerImpl) GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
193192
opts := &kubecontainer.RunContainerOptions{}
194193
// Allocate should already be called during predicateAdmitHandler.Admit(),
195194
// just try to fetch device runtime information from cached state here
@@ -275,11 +274,11 @@ func (cm *containerManagerImpl) GetDynamicResources(pod *v1.Pod, container *v1.C
275274
return nil
276275
}
277276

278-
func (cm *containerManagerImpl) PrepareDynamicResources(pod *v1.Pod) error {
277+
func (cm *containerManagerImpl) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
279278
return nil
280279
}
281280

282-
func (cm *containerManagerImpl) UnprepareDynamicResources(*v1.Pod) error {
281+
func (cm *containerManagerImpl) UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
283282
return nil
284283
}
285284

pkg/kubelet/cm/dra/manager.go

Lines changed: 21 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,6 @@ type ManagerImpl struct {
6868

6969
// NewManagerImpl creates a new manager.
7070
func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string, nodeName types.NodeName) (*ManagerImpl, error) {
71-
klog.V(2).InfoS("Creating DRA manager")
72-
7371
claimInfoCache, err := newClaimInfoCache(stateFileDirectory, draManagerStateFileName)
7472
if err != nil {
7573
return nil, fmt.Errorf("failed to create claimInfo cache: %+v", err)
@@ -91,15 +89,16 @@ func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string, n
9189
}
9290

9391
// Start starts the reconcile loop of the manager.
94-
func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady) error {
92+
func (m *ManagerImpl) Start(ctx context.Context, activePods ActivePodsFunc, sourcesReady config.SourcesReady) error {
9593
m.activePods = activePods
9694
m.sourcesReady = sourcesReady
97-
go wait.Until(func() { m.reconcileLoop() }, m.reconcilePeriod, wait.NeverStop)
95+
go wait.UntilWithContext(ctx, func(ctx context.Context) { m.reconcileLoop(ctx) }, m.reconcilePeriod)
9896
return nil
9997
}
10098

10199
// reconcileLoop ensures that any stale state in the manager's claimInfoCache gets periodically reconciled.
102-
func (m *ManagerImpl) reconcileLoop() {
100+
func (m *ManagerImpl) reconcileLoop(ctx context.Context) {
101+
logger := klog.FromContext(ctx)
103102
// Only once all sources are ready do we attempt to reconcile.
104103
// This ensures that the call to m.activePods() below will succeed with
105104
// the actual active pods list.
@@ -140,8 +139,8 @@ func (m *ManagerImpl) reconcileLoop() {
140139

141140
// Loop through all inactive pods and call UnprepareResources on them.
142141
for _, podClaims := range inactivePodClaims {
143-
if err := m.unprepareResources(podClaims.uid, podClaims.namespace, podClaims.claimNames); err != nil {
144-
klog.ErrorS(err, "Unpreparing pod resources in reconcile loop", "podUID", podClaims.uid)
142+
if err := m.unprepareResources(ctx, podClaims.uid, podClaims.namespace, podClaims.claimNames); err != nil {
143+
logger.Info("Unpreparing pod resources in reconcile loop failed, will retry", "podUID", podClaims.uid, "err", err)
145144
}
146145
}
147146
}
@@ -150,25 +149,26 @@ func (m *ManagerImpl) reconcileLoop() {
150149
// for the input container, issue NodePrepareResources rpc requests
151150
// for each new resource requirement, process their responses and update the cached
152151
// containerResources on success.
153-
func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
152+
func (m *ManagerImpl) PrepareResources(ctx context.Context, pod *v1.Pod) error {
153+
logger := klog.FromContext(ctx)
154154
batches := make(map[string][]*drapb.Claim)
155155
resourceClaims := make(map[types.UID]*resourceapi.ResourceClaim)
156156
for i := range pod.Spec.ResourceClaims {
157157
podClaim := &pod.Spec.ResourceClaims[i]
158-
klog.V(3).InfoS("Processing resource", "pod", klog.KObj(pod), "podClaim", podClaim.Name)
158+
logger.V(3).Info("Processing resource", "pod", klog.KObj(pod), "podClaim", podClaim.Name)
159159
claimName, mustCheckOwner, err := resourceclaim.Name(pod, podClaim)
160160
if err != nil {
161161
return fmt.Errorf("prepare resource claim: %v", err)
162162
}
163163

164164
if claimName == nil {
165165
// Nothing to do.
166-
klog.V(5).InfoS("No need to prepare resources, no claim generated", "pod", klog.KObj(pod), "podClaim", podClaim.Name)
166+
logger.V(5).Info("No need to prepare resources, no claim generated", "pod", klog.KObj(pod), "podClaim", podClaim.Name)
167167
continue
168168
}
169169
// Query claim object from the API server
170170
resourceClaim, err := m.kubeClient.ResourceV1alpha3().ResourceClaims(pod.Namespace).Get(
171-
context.TODO(),
171+
ctx,
172172
*claimName,
173173
metav1.GetOptions{})
174174
if err != nil {
@@ -198,9 +198,9 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
198198
return fmt.Errorf("claim %s: %w", klog.KObj(resourceClaim), err)
199199
}
200200
claimInfo = m.cache.add(ci)
201-
klog.V(6).InfoS("Created new claim info cache entry", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim), "claimInfoEntry", claimInfo)
201+
logger.V(6).Info("Created new claim info cache entry", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim), "claimInfoEntry", claimInfo)
202202
} else {
203-
klog.V(6).InfoS("Found existing claim info cache entry", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim), "claimInfoEntry", claimInfo)
203+
logger.V(6).Info("Found existing claim info cache entry", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim), "claimInfoEntry", claimInfo)
204204
}
205205

206206
// Add a reference to the current pod in the claim info.
@@ -216,7 +216,7 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
216216

217217
// If this claim is already prepared, there is no need to prepare it again.
218218
if claimInfo.isPrepared() {
219-
klog.V(5).InfoS("Resources already prepared", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim))
219+
logger.V(5).Info("Resources already prepared", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim))
220220
return nil
221221
}
222222

@@ -250,7 +250,7 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
250250
if err != nil {
251251
return fmt.Errorf("failed to get gRPC client for driver %s: %w", driverName, err)
252252
}
253-
response, err := client.NodePrepareResources(context.Background(), &drapb.NodePrepareResourcesRequest{Claims: claims})
253+
response, err := client.NodePrepareResources(ctx, &drapb.NodePrepareResourcesRequest{Claims: claims})
254254
if err != nil {
255255
// General error unrelated to any particular claim.
256256
return fmt.Errorf("NodePrepareResources failed: %w", err)
@@ -338,7 +338,6 @@ func (m *ManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*Conta
338338
// was generated for the referenced claim. There are valid use
339339
// cases when this might happen, so we simply skip it.
340340
if claimName == nil {
341-
klog.V(5).InfoS("No CDI devices, no claim generated", "pod", klog.KObj(pod), "podClaimName", podClaim.Name)
342341
continue
343342
}
344343
for _, claim := range container.Resources.Claims {
@@ -362,16 +361,14 @@ func (m *ManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*Conta
362361
}
363362
}
364363
}
365-
366-
klog.V(5).InfoS("Determined CDI devices for pod", "pod", klog.KObj(pod), "cdiDevices", cdiDevices)
367364
return &ContainerInfo{CDIDevices: cdiDevices}, nil
368365
}
369366

370367
// UnprepareResources calls a driver's NodeUnprepareResource API for each resource claim owned by a pod.
371368
// This function is idempotent and may be called multiple times against the same pod.
372369
// As such, calls to the underlying NodeUnprepareResource API are skipped for claims that have
373370
// already been successfully unprepared.
374-
func (m *ManagerImpl) UnprepareResources(pod *v1.Pod) error {
371+
func (m *ManagerImpl) UnprepareResources(ctx context.Context, pod *v1.Pod) error {
375372
var claimNames []string
376373
for i := range pod.Spec.ResourceClaims {
377374
claimName, _, err := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
@@ -386,10 +383,11 @@ func (m *ManagerImpl) UnprepareResources(pod *v1.Pod) error {
386383
}
387384
claimNames = append(claimNames, *claimName)
388385
}
389-
return m.unprepareResources(pod.UID, pod.Namespace, claimNames)
386+
return m.unprepareResources(ctx, pod.UID, pod.Namespace, claimNames)
390387
}
391388

392-
func (m *ManagerImpl) unprepareResources(podUID types.UID, namespace string, claimNames []string) error {
389+
func (m *ManagerImpl) unprepareResources(ctx context.Context, podUID types.UID, namespace string, claimNames []string) error {
390+
logger := klog.FromContext(ctx)
393391
batches := make(map[string][]*drapb.Claim)
394392
claimNamesMap := make(map[types.UID]string)
395393
for _, claimName := range claimNames {
@@ -445,7 +443,7 @@ func (m *ManagerImpl) unprepareResources(podUID types.UID, namespace string, cla
445443
if err != nil {
446444
return fmt.Errorf("get gRPC client for DRA driver %s: %w", driverName, err)
447445
}
448-
response, err := client.NodeUnprepareResources(context.Background(), &drapb.NodeUnprepareResourcesRequest{Claims: claims})
446+
response, err := client.NodeUnprepareResources(ctx, &drapb.NodeUnprepareResourcesRequest{Claims: claims})
449447
if err != nil {
450448
// General error unrelated to any particular claim.
451449
return fmt.Errorf("NodeUnprepareResources failed: %w", err)
@@ -473,7 +471,7 @@ func (m *ManagerImpl) unprepareResources(podUID types.UID, namespace string, cla
473471
for _, claimName := range claimNamesMap {
474472
claimInfo, _ := m.cache.get(claimName, namespace)
475473
m.cache.delete(claimName, namespace)
476-
klog.V(6).InfoS("Deleted claim info cache entry", "claim", klog.KRef(namespace, claimName), "claimInfoEntry", claimInfo)
474+
logger.V(6).Info("Deleted claim info cache entry", "claim", klog.KRef(namespace, claimName), "claimInfoEntry", claimInfo)
477475
}
478476

479477
// Atomically sync the cache back to the checkpoint.

0 commit comments

Comments
 (0)