Skip to content

Commit 8f981cc

Browse files
committed
chore: add more contextual logging info for plugins
1 parent 8b7cc50 commit 8f981cc

File tree

18 files changed

+141
-119
lines changed

18 files changed

+141
-119
lines changed

pkg/capacityscheduling/capacity_scheduling.go

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ func init() {
5858
// CapacityScheduling is a plugin that implements the mechanism of capacity scheduling.
5959
type CapacityScheduling struct {
6060
sync.RWMutex
61+
logger klog.Logger
6162
fh framework.Handle
6263
podLister corelisters.PodLister
6364
pdbLister policylisters.PodDisruptionBudgetLister
@@ -119,7 +120,9 @@ func (c *CapacityScheduling) Name() string {
119120

120121
// New initializes a new plugin and returns it.
121122
func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
123+
lh := klog.FromContext(ctx).WithValues("plugin", Name)
122124
c := &CapacityScheduling{
125+
logger: lh,
123126
fh: handle,
124127
elasticQuotaInfos: NewElasticQuotaInfos(),
125128
podLister: handle.SharedInformerFactory().Core().V1().Pods().Lister(),
@@ -289,7 +292,7 @@ func (c *CapacityScheduling) PreFilterExtensions() framework.PreFilterExtensions
289292

290293
// AddPod from pre-computed data in cycleState.
291294
func (c *CapacityScheduling) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
292-
logger := klog.FromContext(ctx)
295+
logger := klog.FromContext(klog.NewContext(ctx, c.logger))
293296

294297
elasticQuotaSnapshotState, err := getElasticQuotaSnapshotState(cycleState)
295298
if err != nil {
@@ -310,7 +313,7 @@ func (c *CapacityScheduling) AddPod(ctx context.Context, cycleState *framework.C
310313

311314
// RemovePod from pre-computed data in cycleState.
312315
func (c *CapacityScheduling) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
313-
logger := klog.FromContext(ctx)
316+
logger := klog.FromContext(klog.NewContext(ctx, c.logger))
314317

315318
elasticQuotaSnapshotState, err := getElasticQuotaSnapshotState(cycleState)
316319
if err != nil {
@@ -341,8 +344,9 @@ func (c *CapacityScheduling) PostFilter(ctx context.Context, state *framework.Cy
341344
PdbLister: c.pdbLister,
342345
State: state,
343346
Interface: &preemptor{
344-
fh: c.fh,
345-
state: state,
347+
logger: c.logger,
348+
fh: c.fh,
349+
state: state,
346350
},
347351
}
348352

@@ -352,8 +356,7 @@ func (c *CapacityScheduling) PostFilter(ctx context.Context, state *framework.Cy
352356
func (c *CapacityScheduling) Reserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
353357
c.Lock()
354358
defer c.Unlock()
355-
356-
logger := klog.FromContext(ctx)
359+
logger := klog.FromContext(klog.NewContext(ctx, c.logger)).WithValues("ExtensionPoint", "Reserve")
357360

358361
elasticQuotaInfo := c.elasticQuotaInfos[pod.Namespace]
359362
if elasticQuotaInfo != nil {
@@ -382,8 +385,9 @@ func (c *CapacityScheduling) Unreserve(ctx context.Context, state *framework.Cyc
382385
}
383386

384387
type preemptor struct {
385-
fh framework.Handle
386-
state *framework.CycleState
388+
logger klog.Logger
389+
fh framework.Handle
390+
state *framework.CycleState
387391
}
388392

389393
func (p *preemptor) OrderedScoreFuncs(ctx context.Context, nodesToVictims map[string]*extenderv1.Victims) []func(node string) int64 {
@@ -409,7 +413,7 @@ func (p *preemptor) CandidatesToVictimsMap(candidates []preemption.Candidate) ma
409413
// We look at the node that is nominated for this pod and as long as there are
410414
// terminating pods on the node, we don't consider this for preempting more pods.
411415
func (p *preemptor) PodEligibleToPreemptOthers(pod *v1.Pod, nominatedNodeStatus *framework.Status) (bool, string) {
412-
logger := klog.FromContext(context.TODO())
416+
logger := p.logger
413417

414418
if pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever {
415419
logger.V(5).Info("Pod is not eligible for preemption because of its preemptionPolicy", "pod", klog.KObj(pod), "preemptionPolicy", v1.PreemptNever)
@@ -492,7 +496,7 @@ func (p *preemptor) SelectVictimsOnNode(
492496
nodeInfo *framework.NodeInfo,
493497
pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *framework.Status) {
494498

495-
logger := klog.FromContext(ctx)
499+
logger := p.logger
496500

497501
elasticQuotaSnapshotState, err := getElasticQuotaSnapshotState(state)
498502
if err != nil {

pkg/coscheduling/coscheduling.go

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ import (
4040

4141
// Coscheduling is a plugin that schedules pods in a group.
4242
type Coscheduling struct {
43+
logger klog.Logger
4344
frameworkHandler framework.Handle
4445
pgMgr core.Manager
4546
scheduleTimeout *time.Duration
@@ -62,7 +63,7 @@ const (
6263
// New initializes and returns a new Coscheduling plugin.
6364
func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
6465

65-
lh := klog.FromContext(ctx)
66+
lh := klog.FromContext(ctx).WithValues("plugin", Name)
6667
lh.V(5).Info("creating new coscheduling plugin")
6768

6869
args, ok := obj.(*config.CoschedulingArgs)
@@ -91,6 +92,7 @@ func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (fram
9192
handle.SharedInformerFactory().Core().V1().Pods(),
9293
)
9394
plugin := &Coscheduling{
95+
logger: lh,
9496
frameworkHandler: handle,
9597
pgMgr: pgMgr,
9698
scheduleTimeout: &scheduleTimeDuration,
@@ -144,7 +146,7 @@ func (cs *Coscheduling) Less(podInfo1, podInfo2 *framework.QueuedPodInfo) bool {
144146
// 1. Whether the PodGroup that the Pod belongs to is on the deny list.
145147
// 2. Whether the total number of pods in a PodGroup is less than its `minMember`.
146148
func (cs *Coscheduling) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) {
147-
lh := klog.FromContext(ctx)
149+
lh := klog.FromContext(klog.NewContext(ctx, cs.logger)).WithValues("ExtensionPoint", "PreFilter")
148150
// If PreFilter fails, return framework.UnschedulableAndUnresolvable to avoid
149151
// any preemption attempts.
150152
if err := cs.pgMgr.PreFilter(ctx, pod); err != nil {
@@ -157,7 +159,7 @@ func (cs *Coscheduling) PreFilter(ctx context.Context, state *framework.CycleSta
157159
// PostFilter is used to reject a group of pods if a pod does not pass PreFilter or Filter.
158160
func (cs *Coscheduling) PostFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod,
159161
filteredNodeStatusMap framework.NodeToStatusMap) (*framework.PostFilterResult, *framework.Status) {
160-
lh := klog.FromContext(ctx)
162+
lh := klog.FromContext(klog.NewContext(ctx, cs.logger)).WithValues("ExtensionPoint", "PostFilter")
161163
pgName, pg := cs.pgMgr.GetPodGroup(ctx, pod)
162164
if pg == nil {
163165
lh.V(4).Info("Pod does not belong to any group", "pod", klog.KObj(pod))
@@ -210,7 +212,7 @@ func (cs *Coscheduling) PreFilterExtensions() framework.PreFilterExtensions {
210212

211213
// Permit is the functions invoked by the framework at "Permit" extension point.
212214
func (cs *Coscheduling) Permit(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
213-
lh := klog.FromContext(ctx)
215+
lh := klog.FromContext(klog.NewContext(ctx, cs.logger)).WithValues("ExtensionPoint", "Permit")
214216
waitTime := *cs.scheduleTimeout
215217
s := cs.pgMgr.Permit(ctx, state, pod)
216218
var retStatus *framework.Status
@@ -251,7 +253,7 @@ func (cs *Coscheduling) Reserve(ctx context.Context, state *framework.CycleState
251253

252254
// Unreserve rejects all other Pods in the PodGroup when one of the pods in the group times out.
253255
func (cs *Coscheduling) Unreserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) {
254-
lh := klog.FromContext(ctx)
256+
lh := klog.FromContext(klog.NewContext(ctx, cs.logger)).WithValues("ExtensionPoint", "Unreserve")
255257
pgName, pg := cs.pgMgr.GetPodGroup(ctx, pod)
256258
if pg == nil {
257259
return

pkg/networkaware/networkoverhead/networkoverhead.go

Lines changed: 20 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ func init() {
7373
// NetworkOverhead : Filter and Score nodes based on Pod's AppGroup requirements: MaxNetworkCosts requirements among Pods with dependencies
7474
type NetworkOverhead struct {
7575
client.Client
76-
76+
logger klog.Logger
7777
podLister corelisters.PodLister
7878
handle framework.Handle
7979
namespaces []string
@@ -140,7 +140,7 @@ func (no *NetworkOverhead) ScoreExtensions() framework.ScoreExtensions {
140140

141141
// New : create an instance of a NetworkOverhead plugin
142142
func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
143-
logger := klog.FromContext(ctx)
143+
logger := klog.FromContext(ctx).WithValues("plugin", Name)
144144
logger.V(4).Info("Creating new instance of the NetworkOverhead plugin")
145145

146146
args, err := getArgs(obj)
@@ -155,8 +155,8 @@ func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (fram
155155
}
156156

157157
no := &NetworkOverhead{
158-
Client: client,
159-
158+
Client: client,
159+
logger: logger,
160160
podLister: handle.SharedInformerFactory().Core().V1().Pods().Lister(),
161161
handle: handle,
162162
namespaces: args.Namespaces,
@@ -178,7 +178,7 @@ func (no *NetworkOverhead) PreFilter(ctx context.Context, state *framework.Cycle
178178
preFilterState := &PreFilterState{
179179
scoreEqually: true,
180180
}
181-
logger := klog.FromContext(ctx)
181+
logger := klog.FromContext(klog.NewContext(ctx, no.logger)).WithValues("ExtensionPoint", "PreFilter")
182182

183183
// Write initial status
184184
state.Write(preFilterStateKey, preFilterState)
@@ -190,10 +190,10 @@ func (no *NetworkOverhead) PreFilter(ctx context.Context, state *framework.Cycle
190190
}
191191

192192
// Get AppGroup CR
193-
appGroup := no.findAppGroupNetworkOverhead(ctx, logger, agName)
193+
appGroup := no.findAppGroupNetworkOverhead(ctx, agName)
194194

195195
// Get NetworkTopology CR
196-
networkTopology := no.findNetworkTopologyNetworkOverhead(ctx, logger)
196+
networkTopology := no.findNetworkTopologyNetworkOverhead(ctx)
197197

198198
// Sort Costs if manual weights were selected
199199
no.sortNetworkTopologyCosts(networkTopology)
@@ -273,7 +273,7 @@ func (no *NetworkOverhead) PreFilter(ctx context.Context, state *framework.Cycle
273273
logger.V(6).Info("Number of dependencies", "satisfied", satisfied, "violated", violated)
274274

275275
// Get accumulated cost based on pod dependencies
276-
cost, ok := no.getAccumulatedCost(logger, scheduledList, dependencyList, nodeInfo.Node().Name, region, zone, costMap)
276+
cost, ok := no.getAccumulatedCost(scheduledList, dependencyList, nodeInfo.Node().Name, region, zone, costMap)
277277
if ok != nil {
278278
return nil, framework.NewStatus(framework.Error, fmt.Sprintf("getting pod hostname from Snapshot: %v", ok))
279279
}
@@ -332,7 +332,7 @@ func (no *NetworkOverhead) Filter(ctx context.Context,
332332
if nodeInfo.Node() == nil {
333333
return framework.NewStatus(framework.Error, "node not found")
334334
}
335-
logger := klog.FromContext(ctx)
335+
logger := klog.FromContext(klog.NewContext(ctx, no.logger)).WithValues("ExtensionPoint", "Filter")
336336

337337
// Get PreFilterState
338338
preFilterState, err := getPreFilterState(cycleState)
@@ -367,7 +367,7 @@ func (no *NetworkOverhead) Score(ctx context.Context,
367367
nodeName string) (int64, *framework.Status) {
368368
score := framework.MinNodeScore
369369

370-
logger := klog.FromContext(ctx)
370+
logger := klog.FromContext(klog.NewContext(ctx, no.logger)).WithValues("ExtensionPoint", "Score")
371371
// Get PreFilterState
372372
preFilterState, err := getPreFilterState(cycleState)
373373
if err != nil {
@@ -391,7 +391,7 @@ func (no *NetworkOverhead) NormalizeScore(ctx context.Context,
391391
state *framework.CycleState,
392392
pod *corev1.Pod,
393393
scores framework.NodeScoreList) *framework.Status {
394-
logger := klog.FromContext(ctx)
394+
logger := klog.FromContext(klog.NewContext(ctx, no.logger)).WithValues("ExtensionPoint", "NormalizeScore")
395395
logger.V(4).Info("before normalization: ", "scores", scores)
396396

397397
// Get Min and Max Scores to normalize between framework.MaxNodeScore and framework.MinNodeScore
@@ -575,7 +575,6 @@ func checkMaxNetworkCostRequirements(
575575

576576
// getAccumulatedCost : calculate the accumulated cost based on the Pod's dependencies
577577
func (no *NetworkOverhead) getAccumulatedCost(
578-
logger klog.Logger,
579578
scheduledList networkawareutil.ScheduledList,
580579
dependencyList []agv1alpha1.DependenciesInfo,
581580
nodeName string,
@@ -599,7 +598,7 @@ func (no *NetworkOverhead) getAccumulatedCost(
599598
// Get NodeInfo from pod Hostname
600599
podNodeInfo, err := no.handle.SnapshotSharedLister().NodeInfos().Get(podAllocated.Hostname)
601600
if err != nil {
602-
logger.Error(err, "getting pod hostname from Snapshot", "nodeInfo", podNodeInfo)
601+
no.logger.Error(err, "getting pod hostname from Snapshot", "nodeInfo", podNodeInfo)
603602
return cost, err
604603
}
605604
// Get zone and region from Pod Hostname
@@ -653,18 +652,18 @@ func getPreFilterState(cycleState *framework.CycleState) (*PreFilterState, error
653652
return state, nil
654653
}
655654

656-
func (no *NetworkOverhead) findAppGroupNetworkOverhead(ctx context.Context, logger klog.Logger, agName string) *agv1alpha1.AppGroup {
657-
logger.V(6).Info("Debugging namespaces", "namespaces", no.namespaces)
655+
func (no *NetworkOverhead) findAppGroupNetworkOverhead(ctx context.Context, agName string) *agv1alpha1.AppGroup {
656+
no.logger.V(6).Info("Debugging namespaces", "namespaces", no.namespaces)
658657
for _, namespace := range no.namespaces {
659-
logger.V(6).Info("appGroup CR", "namespace", namespace, "name", agName)
658+
no.logger.V(6).Info("appGroup CR", "namespace", namespace, "name", agName)
660659
// AppGroup could not be placed in several namespaces simultaneously
661660
appGroup := &agv1alpha1.AppGroup{}
662661
err := no.Get(ctx, client.ObjectKey{
663662
Namespace: namespace,
664663
Name: agName,
665664
}, appGroup)
666665
if err != nil {
667-
logger.V(4).Error(err, "Cannot get AppGroup from AppGroupNamespaceLister:")
666+
no.logger.V(4).Error(err, "Cannot get AppGroup from AppGroupNamespaceLister:")
668667
continue
669668
}
670669
if appGroup != nil && appGroup.GetUID() != "" {
@@ -674,18 +673,18 @@ func (no *NetworkOverhead) findAppGroupNetworkOverhead(ctx context.Context, logg
674673
return nil
675674
}
676675

677-
func (no *NetworkOverhead) findNetworkTopologyNetworkOverhead(ctx context.Context, logger klog.Logger) *ntv1alpha1.NetworkTopology {
678-
logger.V(6).Info("Debugging namespaces", "namespaces", no.namespaces)
676+
func (no *NetworkOverhead) findNetworkTopologyNetworkOverhead(ctx context.Context) *ntv1alpha1.NetworkTopology {
677+
no.logger.V(6).Info("Debugging namespaces", "namespaces", no.namespaces)
679678
for _, namespace := range no.namespaces {
680-
logger.V(6).Info("networkTopology CR:", "namespace", namespace, "name", no.ntName)
679+
no.logger.V(6).Info("networkTopology CR:", "namespace", namespace, "name", no.ntName)
681680
// NetworkTopology could not be placed in several namespaces simultaneously
682681
networkTopology := &ntv1alpha1.NetworkTopology{}
683682
err := no.Get(ctx, client.ObjectKey{
684683
Namespace: namespace,
685684
Name: no.ntName,
686685
}, networkTopology)
687686
if err != nil {
688-
logger.V(4).Error(err, "Cannot get networkTopology from networkTopologyNamespaceLister:")
687+
no.logger.V(4).Error(err, "Cannot get networkTopology from networkTopologyNamespaceLister:")
689688
continue
690689
}
691690
if networkTopology != nil && networkTopology.GetUID() != "" {

pkg/networkaware/topologicalsort/topologicalsort.go

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ func init() {
5050
// TopologicalSort : Sort pods based on their AppGroup and corresponding microservice dependencies
5151
type TopologicalSort struct {
5252
client.Client
53+
logger klog.Logger
5354
handle framework.Handle
5455
namespaces []string
5556
}
@@ -73,7 +74,7 @@ func getArgs(obj runtime.Object) (*pluginconfig.TopologicalSortArgs, error) {
7374

7475
// New : create an instance of a TopologicalSort plugin
7576
func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
76-
logger := klog.FromContext(ctx)
77+
logger := klog.FromContext(ctx).WithValues("plugin", Name)
7778
logger.V(4).Info("Creating new instance of the TopologicalSort plugin")
7879

7980
args, err := getArgs(obj)
@@ -90,6 +91,7 @@ func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (fram
9091

9192
pl := &TopologicalSort{
9293
Client: client,
94+
logger: logger,
9395
handle: handle,
9496
namespaces: args.Namespaces,
9597
}
@@ -103,7 +105,7 @@ func (ts *TopologicalSort) Less(pInfo1, pInfo2 *framework.QueuedPodInfo) bool {
103105
p1AppGroup := networkawareutil.GetPodAppGroupLabel(pInfo1.Pod)
104106
p2AppGroup := networkawareutil.GetPodAppGroupLabel(pInfo2.Pod)
105107
ctx := context.TODO()
106-
logger := klog.FromContext(ctx)
108+
logger := ts.logger.WithValues("ExtensionPoint", "Less")
107109

108110
// If pods do not belong to an AppGroup, or being to different AppGroups, follow vanilla QoS Sort
109111
if p1AppGroup != p2AppGroup || len(p1AppGroup) == 0 {
@@ -115,7 +117,7 @@ func (ts *TopologicalSort) Less(pInfo1, pInfo2 *framework.QueuedPodInfo) bool {
115117
// Pods belong to the same appGroup, get the CR
116118
logger.V(6).Info("Pods belong to the same AppGroup CR", "p1 name", pInfo1.Pod.Name, "p2 name", pInfo2.Pod.Name, "appGroup", p1AppGroup)
117119
agName := p1AppGroup
118-
appGroup := ts.findAppGroupTopologicalSort(ctx, logger, agName)
120+
appGroup := ts.findAppGroupTopologicalSort(ctx, agName)
119121

120122
// Get labels from both pods
121123
labelsP1 := pInfo1.Pod.GetLabels()
@@ -131,7 +133,8 @@ func (ts *TopologicalSort) Less(pInfo1, pInfo2 *framework.QueuedPodInfo) bool {
131133
return orderP1 <= orderP2
132134
}
133135

134-
func (ts *TopologicalSort) findAppGroupTopologicalSort(ctx context.Context, logger klog.Logger, agName string) *agv1alpha.AppGroup {
136+
func (ts *TopologicalSort) findAppGroupTopologicalSort(ctx context.Context, agName string) *agv1alpha.AppGroup {
137+
logger := ts.logger
135138
for _, namespace := range ts.namespaces {
136139
logger.V(6).Info("appGroup CR", "namespace", namespace, "name", agName)
137140
// AppGroup couldn't be placed in several namespaces simultaneously

pkg/noderesources/allocatable.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
// Allocatable is a score plugin that favors nodes based on their allocatable
3434
// resources.
3535
type Allocatable struct {
36+
logger klog.Logger
3637
handle framework.Handle
3738
resourceAllocationScorer
3839
}
@@ -59,7 +60,7 @@ func validateResources(resources []schedulerconfig.ResourceSpec) error {
5960

6061
// Score invoked at the score extension point.
6162
func (alloc *Allocatable) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
62-
logger := klog.FromContext(ctx)
63+
logger := klog.FromContext(klog.NewContext(ctx, alloc.logger)).WithValues("ExtensionPoint", "Score")
6364
nodeInfo, err := alloc.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
6465
if err != nil {
6566
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
@@ -79,7 +80,7 @@ func (alloc *Allocatable) ScoreExtensions() framework.ScoreExtensions {
7980

8081
// NewAllocatable initializes a new plugin and returns it.
8182
func NewAllocatable(ctx context.Context, allocArgs runtime.Object, h framework.Handle) (framework.Plugin, error) {
82-
logger := klog.FromContext(ctx)
83+
logger := klog.FromContext(ctx).WithValues("plugin", AllocatableName)
8384
// Start with default values.
8485
mode := config.Least
8586
resToWeightMap := defaultResourcesToWeightMap
@@ -110,6 +111,7 @@ func NewAllocatable(ctx context.Context, allocArgs runtime.Object, h framework.H
110111
}
111112

112113
return &Allocatable{
114+
logger: logger,
113115
handle: h,
114116
resourceAllocationScorer: resourceAllocationScorer{
115117
Name: AllocatableName,

pkg/noderesourcetopology/filter.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,8 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
191191

192192
nodeName := nodeInfo.Node().Name
193193

194-
lh := klog.FromContext(ctx).WithValues(logging.KeyPod, klog.KObj(pod), logging.KeyPodUID, logging.PodUID(pod), logging.KeyNode, nodeName)
194+
lh := klog.FromContext(klog.NewContext(ctx, tm.logger)).WithValues("ExtensionPoint", "Filter").
195+
WithValues(logging.KeyPod, klog.KObj(pod), logging.KeyPodUID, logging.PodUID(pod), logging.KeyNode, nodeName)
195196

196197
lh.V(4).Info(logging.FlowBegin)
197198
defer lh.V(4).Info(logging.FlowEnd)

0 commit comments

Comments
 (0)