Skip to content

Commit 28acbc0

Browse files
authored
Merge pull request #823 from googs1025/feature/contextual_logging_part2
feature: use contextal logging for integration
2 parents 0a14853 + 9f5d626 commit 28acbc0

21 files changed

+128
-131
lines changed

pkg/capacityscheduling/capacity_scheduling.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -418,7 +418,7 @@ func (p *preemptor) PodEligibleToPreemptOthers(pod *v1.Pod, nominatedNodeStatus
418418

419419
preFilterState, err := getPreFilterState(p.state)
420420
if err != nil {
421-
logger.V(5).Info("Failed to read preFilterState from cycleState, err: %s", err, "preFilterStateKey", preFilterStateKey)
421+
logger.V(5).Error(err, "Failed to read preFilterState from cycleState", "preFilterStateKey", preFilterStateKey)
422422
return false, "not eligible due to failed to read from cycleState"
423423
}
424424

pkg/networkaware/networkoverhead/networkoverhead.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,7 @@ func checkMaxNetworkCostRequirements(
528528
// If Nodes are not the same, get NodeInfo from pod Hostname
529529
podNodeInfo, err := no.handle.SnapshotSharedLister().NodeInfos().Get(podAllocated.Hostname)
530530
if err != nil {
531-
logger.Error(nil, "getting pod nodeInfo %q from Snapshot: %v", podNodeInfo, err)
531+
logger.Error(err, "getting pod's NodeInfo from snapshot", "nodeInfo", podNodeInfo)
532532
return satisfied, violated, err
533533
}
534534

@@ -599,7 +599,7 @@ func (no *NetworkOverhead) getAccumulatedCost(
599599
// Get NodeInfo from pod Hostname
600600
podNodeInfo, err := no.handle.SnapshotSharedLister().NodeInfos().Get(podAllocated.Hostname)
601601
if err != nil {
602-
logger.Error(nil, "getting pod hostname %q from Snapshot: %v", podNodeInfo, err)
602+
logger.Error(err, "getting pod hostname from Snapshot", "nodeInfo", podNodeInfo)
603603
return cost, err
604604
}
605605
// Get zone and region from Pod Hostname
@@ -654,7 +654,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*PreFilterState, error
654654
}
655655

656656
func (no *NetworkOverhead) findAppGroupNetworkOverhead(ctx context.Context, logger klog.Logger, agName string) *agv1alpha1.AppGroup {
657-
logger.V(6).Info("namespaces: %s", no.namespaces)
657+
logger.V(6).Info("Debugging namespaces", "namespaces", no.namespaces)
658658
for _, namespace := range no.namespaces {
659659
logger.V(6).Info("appGroup CR", "namespace", namespace, "name", agName)
660660
// AppGroup could not be placed in several namespaces simultaneously
@@ -675,7 +675,7 @@ func (no *NetworkOverhead) findAppGroupNetworkOverhead(ctx context.Context, logg
675675
}
676676

677677
func (no *NetworkOverhead) findNetworkTopologyNetworkOverhead(ctx context.Context, logger klog.Logger) *ntv1alpha1.NetworkTopology {
678-
logger.V(6).Info("namespaces: %s", no.namespaces)
678+
logger.V(6).Info("Debugging namespaces", "namespaces", no.namespaces)
679679
for _, namespace := range no.namespaces {
680680
logger.V(6).Info("networkTopology CR:", "namespace", namespace, "name", no.ntName)
681681
// NetworkTopology could not be placed in several namespaces simultaneously

pkg/noderesources/allocatable.go

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,8 @@ func (alloc *Allocatable) ScoreExtensions() framework.ScoreExtensions {
7878
}
7979

8080
// NewAllocatable initializes a new plugin and returns it.
81-
func NewAllocatable(_ context.Context, allocArgs runtime.Object, h framework.Handle) (framework.Plugin, error) {
81+
func NewAllocatable(ctx context.Context, allocArgs runtime.Object, h framework.Handle) (framework.Plugin, error) {
82+
logger := klog.FromContext(ctx)
8283
// Start with default values.
8384
mode := config.Least
8485
resToWeightMap := defaultResourcesToWeightMap
@@ -112,34 +113,34 @@ func NewAllocatable(_ context.Context, allocArgs runtime.Object, h framework.Han
112113
handle: h,
113114
resourceAllocationScorer: resourceAllocationScorer{
114115
Name: AllocatableName,
115-
scorer: resourceScorer(resToWeightMap, mode),
116+
scorer: resourceScorer(logger, resToWeightMap, mode),
116117
resourceToWeightMap: resToWeightMap,
117118
},
118119
}, nil
119120
}
120121

121-
func resourceScorer(resToWeightMap resourceToWeightMap, mode config.ModeType) func(resourceToValueMap, resourceToValueMap) int64 {
122+
func resourceScorer(logger klog.Logger, resToWeightMap resourceToWeightMap, mode config.ModeType) func(resourceToValueMap, resourceToValueMap) int64 {
122123
return func(requested, allocable resourceToValueMap) int64 {
123124
// TODO: consider volumes in scoring.
124125
var nodeScore, weightSum int64
125126
for resource, weight := range resToWeightMap {
126-
resourceScore := score(allocable[resource], mode)
127+
resourceScore := score(logger, allocable[resource], mode)
127128
nodeScore += resourceScore * weight
128129
weightSum += weight
129130
}
130131
return nodeScore / weightSum
131132
}
132133
}
133134

134-
func score(capacity int64, mode config.ModeType) int64 {
135+
func score(logger klog.Logger, capacity int64, mode config.ModeType) int64 {
135136
switch mode {
136137
case config.Least:
137138
return -1 * capacity
138139
case config.Most:
139140
return capacity
140141
}
141142

142-
klog.V(10).InfoS("No match for mode", "mode", mode)
143+
logger.V(10).Info("No match for mode", "mode", mode)
143144
return 0
144145
}
145146

pkg/noderesourcetopology/cache/attr_watch.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package cache
1818

1919
import (
2020
"context"
21-
21+
"fmt"
2222
"github.com/go-logr/logr"
2323

2424
"k8s.io/apimachinery/pkg/watch"
@@ -72,7 +72,7 @@ func (wt Watcher) ProcessEvent(ev watch.Event) bool {
7272

7373
nrtObj, ok := ev.Object.(*topologyv1alpha2.NodeResourceTopology)
7474
if !ok {
75-
wt.lh.Info("unexpected object %T", ev.Object)
75+
wt.lh.Info("unexpected object", "kind", fmt.Sprintf("%T", ev.Object))
7676
return false
7777
}
7878

pkg/trimaran/resourcestats.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ func GetNodeRequestsAndLimits(logger klog.Logger, podInfosOnNode []*framework.Po
211211
setMin(&nodeRequestMinusPod.MilliCPU, capCpu)
212212
setMin(&nodeRequestMinusPod.Memory, capMem)
213213

214-
klog.V(6).InfoS("Total node resources:", "node", klog.KObj(node),
214+
logger.V(6).Info("Total node resources:", "node", klog.KObj(node),
215215
"CPU-req", nodeRequest.MilliCPU, "Memory-req", nodeRequest.Memory,
216216
"CPU-limit", nodeLimit.MilliCPU, "Memory-limit", nodeLimit.Memory,
217217
"CPU-cap", nodeCapacity.MilliCPU, "Memory-cap", nodeCapacity.Memory)

test/integration/allocatable_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ func TestAllocatablePlugin(t *testing.T) {
135135
for i := range pods {
136136
// Wait for the pod to be scheduled.
137137
err := wait.PollUntilContextTimeout(testCtx.Ctx, 1*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
138-
return podScheduled(cs, pods[i].Namespace, pods[i].Name), nil
138+
return podScheduled(t, cs, pods[i].Namespace, pods[i].Name), nil
139139
})
140140
if err != nil {
141141
t.Fatalf("Waiting for pod %q to be scheduled, error: %v", pods[i].Name, err.Error())

test/integration/capacity_scheduling_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ func TestCapacityScheduling(t *testing.T) {
524524

525525
if err := wait.PollUntilContextTimeout(testCtx.Ctx, time.Millisecond*200, 10*time.Second, false, func(ctx context.Context) (bool, error) {
526526
for _, pod := range tt.existPods {
527-
if !podScheduled(cs, pod.Namespace, pod.Name) {
527+
if !podScheduled(t, cs, pod.Namespace, pod.Name) {
528528
return false, nil
529529
}
530530
}
@@ -543,7 +543,7 @@ func TestCapacityScheduling(t *testing.T) {
543543

544544
if err := wait.PollUntilContextTimeout(testCtx.Ctx, time.Millisecond*200, 10*time.Second, false, func(ctx context.Context) (bool, error) {
545545
for _, v := range tt.expectedPods {
546-
if !podScheduled(cs, v.Namespace, v.Name) {
546+
if !podScheduled(t, cs, v.Namespace, v.Name) {
547547
return false, nil
548548
}
549549
}

test/integration/coscheduling_test.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929
"k8s.io/apimachinery/pkg/util/uuid"
3030
"k8s.io/apimachinery/pkg/util/wait"
3131
"k8s.io/client-go/kubernetes"
32-
"k8s.io/klog/v2"
3332
"k8s.io/kubernetes/pkg/scheduler"
3433
schedapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
3534
fwkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
@@ -356,14 +355,14 @@ func TestCoschedulingPlugin(t *testing.T) {
356355
defer cleanupPods(t, testCtx, tt.pods)
357356
// Create Pods, we will expect them to be scheduled in a reversed order.
358357
for i := range tt.pods {
359-
klog.InfoS("Creating pod ", "podName", tt.pods[i].Name)
358+
t.Logf("Creating pod: %s", tt.pods[i].Name)
360359
if _, err := cs.CoreV1().Pods(tt.pods[i].Namespace).Create(testCtx.Ctx, tt.pods[i], metav1.CreateOptions{}); err != nil {
361360
t.Fatalf("Failed to create Pod %q: %v", tt.pods[i].Name, err)
362361
}
363362
}
364363
err = wait.PollUntilContextTimeout(testCtx.Ctx, 1*time.Second, 120*time.Second, false, func(ctx context.Context) (bool, error) {
365364
for _, v := range tt.expectedPods {
366-
if !podScheduled(cs, ns, v) {
365+
if !podScheduled(t, cs, ns, v) {
367366
return false, nil
368367
}
369368
}
@@ -534,14 +533,14 @@ func TestPodgroupBackoff(t *testing.T) {
534533
defer cleanupPods(t, testCtx, tt.pods)
535534
// Create Pods, we will expect them to be scheduled in a reversed order.
536535
for i := range tt.pods {
537-
klog.InfoS("Creating pod ", "podName", tt.pods[i].Name)
536+
t.Logf("Creating pod %s", tt.pods[i].Name)
538537
if _, err := cs.CoreV1().Pods(tt.pods[i].Namespace).Create(testCtx.Ctx, tt.pods[i], metav1.CreateOptions{}); err != nil {
539538
t.Fatalf("Failed to create Pod %q: %v", tt.pods[i].Name, err)
540539
}
541540
}
542541
err = wait.PollUntilContextTimeout(testCtx.Ctx, 1*time.Second, 120*time.Second, false, func(ctx context.Context) (bool, error) {
543542
for _, v := range tt.expectedPods {
544-
if !podScheduled(cs, ns, v) {
543+
if !podScheduled(t, cs, ns, v) {
545544
return false, nil
546545
}
547546
}

test/integration/elasticquota_controller_test.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131
quota "k8s.io/apiserver/pkg/quota/v1"
3232
"k8s.io/client-go/kubernetes"
3333
"k8s.io/client-go/kubernetes/scheme"
34-
"k8s.io/klog/v2"
3534
"k8s.io/kubernetes/pkg/scheduler"
3635
fwkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
3736
st "k8s.io/kubernetes/pkg/scheduler/testing"
@@ -303,7 +302,7 @@ func TestElasticController(t *testing.T) {
303302
}
304303
if err := wait.PollUntilContextTimeout(testCtx.Ctx, time.Millisecond*200, 10*time.Second, false, func(ctx context.Context) (bool, error) {
305304
for _, pod := range tt.incomingPods {
306-
if !podScheduled(cs, pod.Namespace, pod.Name) {
305+
if !podScheduled(t, cs, pod.Namespace, pod.Name) {
307306
return false, nil
308307
}
309308
}
@@ -317,7 +316,7 @@ func TestElasticController(t *testing.T) {
317316
var eq schedv1alpha1.ElasticQuota
318317
if err := extClient.Get(ctx, types.NamespacedName{Namespace: v.Namespace, Name: v.Name}, &eq); err != nil {
319318
// This could be a connection error so we want to retry.
320-
klog.ErrorS(err, "Failed to obtain the elasticQuota clientSet")
319+
t.Logf("Failed to obtain the elasticQuota clientSet: %s", err)
321320
return false, err
322321
}
323322
if !quota.Equals(eq.Status.Used, v.Status.Used) {
@@ -337,7 +336,7 @@ func TestElasticController(t *testing.T) {
337336
}
338337
if err := wait.PollUntilContextTimeout(testCtx.Ctx, time.Millisecond*200, 10*time.Second, false, func(ctx context.Context) (bool, error) {
339338
for _, pod := range tt.incomingPods {
340-
if !podScheduled(cs, pod.Namespace, pod.Name) {
339+
if !podScheduled(t, cs, pod.Namespace, pod.Name) {
341340
return false, nil
342341
}
343342
}
@@ -351,7 +350,7 @@ func TestElasticController(t *testing.T) {
351350
var eq schedv1alpha1.ElasticQuota
352351
if err := extClient.Get(ctx, types.NamespacedName{Namespace: v.Namespace, Name: v.Name}, &eq); err != nil {
353352
// This could be a connection error so we want to retry.
354-
klog.ErrorS(err, "Failed to obtain the elasticQuota clientSet")
353+
t.Errorf("Failed to obtain the elasticQuota clientSet: %s", err)
355354
return false, err
356355
}
357356
if !quota.Equals(eq.Status.Used, v.Status.Used) {

test/integration/loadVariationRiskBalancing_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ func TestLoadVariationRiskBalancingPlugin(t *testing.T) {
184184
expected := [2]string{"node-1", "node-1"}
185185
for i := range newPods {
186186
err := wait.PollUntilContextTimeout(testCtx.Ctx, 1*time.Second, 10*time.Second, false, func(ctx context.Context) (bool, error) {
187-
return podScheduled(cs, newPods[i].Namespace, newPods[i].Name), nil
187+
return podScheduled(t, cs, newPods[i].Namespace, newPods[i].Name), nil
188188
})
189189
assert.Nil(t, err)
190190

0 commit comments

Comments
 (0)