Skip to content

Commit cbf9da4

Browse files
committed
nrt: filter: enable to coalesce log filter reason
When the nodetopologymatch filter rejects a node, we see the reason like ``` I0314 09:02:43.860984 1 scheduler.go:351] "Unable to schedule pod; no fit; waiting" pod="lns14n245p-a39b5b35-1878-49a5-ae75-0e39f30eb0ef/g-node-density-1340" err="0/17 nodes are available: 1 invalid node topology data for node $NODE1, 1 invalid node topology data for node $NODE2, 1 invalid node topology data for node $NODE3, 1 invalid node topology data for node $NODE4, 10 Too many pods, 3 node(s) didn't match Pod's node affinity/selector, 3 node(s) had untolerated taint {node-role.kubernetes.io/master: }. preemption: 0/17 nodes are available: 14 No preemption victims found for incoming pod, 3 Preemption is not helpful for scheduling." ``` This wants to be helpful, but doesn't scale past a handful of nodes, and clutters the message potentially hiding useful information. The message should include a summary, to read like: ``` I0314 09:02:43.860984 1 scheduler.go:351] "Unable to schedule pod; no fit; waiting" pod="lns14n245p-a39b5b35-1878-49a5-ae75-0e39f30eb0ef/g-node-density-1340" err="0/17 nodes are available: 4 invalid node topology data, 10 Too many pods, 3 node(s) didn't match Pod's node affinity/selector, 3 node(s) had untolerated taint {node-role.kubernetes.io/master: }. preemption: 0/17 nodes are available: 14 No preemption victims found for incoming pod, 3 Preemption is not helpful for scheduling." ``` We should then find other means (logs? metrics?) to enable to zoom in and understand detailed reject reasons. They just don't belong here. The first step to make it possible to coalesce the filter reasons is to use uniform and constant messages when filter decides a node is Unschedulable. We move the more detailed reject reason to logs, lacking for now better alternatives Signed-off-by: Francesco Romani <[email protected]>
1 parent 726759a commit cbf9da4

File tree

3 files changed

+48
-44
lines changed

3 files changed

+48
-44
lines changed

pkg/noderesourcetopology/filter.go

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,8 @@ func singleNUMAContainerLevelHandler(pod *v1.Pod, zones topologyv1alpha2.ZoneLis
6060
_, match := resourcesAvailableInAnyNUMANodes(logID, nodes, initContainer.Resources.Requests, qos, nodeInfo)
6161
if !match {
6262
// we can't align init container, so definitely we can't align a pod
63-
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align init container: %s", initContainer.Name))
63+
klog.V(2).InfoS("cannot align container", "name", initContainer.Name, "kind", "init")
64+
return framework.NewStatus(framework.Unschedulable, "cannot align init container")
6465
}
6566
}
6667

@@ -71,7 +72,8 @@ func singleNUMAContainerLevelHandler(pod *v1.Pod, zones topologyv1alpha2.ZoneLis
7172
numaID, match := resourcesAvailableInAnyNUMANodes(logID, nodes, container.Resources.Requests, qos, nodeInfo)
7273
if !match {
7374
// we can't align container, so definitely we can't align a pod
74-
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align container: %s", container.Name))
75+
klog.V(2).InfoS("cannot align container", "name", container.Name, "kind", "app")
76+
return framework.NewStatus(framework.Unschedulable, "cannot align container")
7577
}
7678

7779
// subtract the resources requested by the container from the given NUMA.
@@ -183,7 +185,8 @@ func singleNUMAPodLevelHandler(pod *v1.Pod, zones topologyv1alpha2.ZoneList, nod
183185
klog.V(6).InfoS("target resources", stringify.ResourceListToLoggable(logID, resources)...)
184186

185187
if _, match := resourcesAvailableInAnyNUMANodes(logID, createNUMANodeList(zones), resources, v1qos.GetPodQOS(pod), nodeInfo); !match {
186-
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align pod: %s", pod.Name))
188+
klog.V(2).InfoS("cannot align pod", "name", pod.Name)
189+
return framework.NewStatus(framework.Unschedulable, "cannot align pod")
187190
}
188191
return nil
189192
}
@@ -200,7 +203,8 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
200203
nodeName := nodeInfo.Node().Name
201204
nodeTopology, ok := tm.nrtCache.GetCachedNRTCopy(nodeName, pod)
202205
if !ok {
203-
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("invalid node topology data for node %s", nodeName))
206+
klog.V(2).InfoS("invalid topology data", "node", nodeName)
207+
return framework.NewStatus(framework.Unschedulable, "invalid node topology data")
204208
}
205209
if nodeTopology == nil {
206210
return nil

pkg/noderesourcetopology/filter_test.go

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ func TestNodeResourceTopology(t *testing.T) {
254254
pod: makePodByResourceList(&v1.ResourceList{
255255
nicResourceName: *resource.NewQuantity(20, resource.DecimalSI)}),
256256
node: nodes[2],
257-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: "),
257+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
258258
},
259259
{
260260
name: "Best effort QoS requesting devices, Container Scope Topology policy; pod fit",
@@ -268,7 +268,7 @@ func TestNodeResourceTopology(t *testing.T) {
268268
pod: makePodByResourceList(&v1.ResourceList{
269269
nicResourceName: *resource.NewQuantity(20, resource.DecimalSI)}),
270270
node: nodes[0],
271-
wantStatus: framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align container: %s", containerName)),
271+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align container"),
272272
},
273273
{
274274
name: "Best effort QoS requesting devices and extended resources, Container Scope Topology policy; pod doesn't fit",
@@ -291,7 +291,7 @@ func TestNodeResourceTopology(t *testing.T) {
291291
nicResourceName: *resource.NewQuantity(11, resource.DecimalSI)},
292292
),
293293
node: nodes[1],
294-
wantStatus: framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align container: %s", containerName)),
294+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align container"),
295295
},
296296
{
297297
name: "Best effort QoS, requesting CPU, memory (enough on NUMA) and devices (not enough), Pod Scope Topology policy; pod doesn't fit",
@@ -306,7 +306,7 @@ func TestNodeResourceTopology(t *testing.T) {
306306
nicResourceName: *resource.NewQuantity(6, resource.DecimalSI)},
307307
),
308308
node: nodes[2],
309-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: "),
309+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
310310
},
311311
{
312312
name: "Best effort QoS requesting CPU, memory (enough on NUMA) and devices, Pod Scope Topology policy; pod fit",
@@ -427,15 +427,15 @@ func TestNodeResourceTopology(t *testing.T) {
427427
v1.ResourceCPU: *resource.NewQuantity(4, resource.DecimalSI),
428428
nicResourceName: *resource.NewQuantity(11, resource.DecimalSI)}),
429429
node: nodes[1],
430-
wantStatus: framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align container: %s", containerName)),
430+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align container"),
431431
},
432432
{
433433
name: "Burstable QoS, requesting CPU and devices (not enough), Pod Scope Topology policy; pod doesn't fit",
434434
pod: makePodByResourceList(&v1.ResourceList{
435435
v1.ResourceCPU: *resource.NewQuantity(2, resource.DecimalSI),
436436
nicResourceName: *resource.NewQuantity(6, resource.DecimalSI)}),
437437
node: nodes[2],
438-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: "),
438+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
439439
},
440440
{
441441
name: "Burstable QoS requesting CPU (enough on NUMA) and devices, Pod Scope Topology policy; pod fit",
@@ -475,15 +475,15 @@ func TestNodeResourceTopology(t *testing.T) {
475475
v1.ResourceMemory: resource.MustParse("2Gi"),
476476
nicResourceName: *resource.NewQuantity(11, resource.DecimalSI)}),
477477
node: nodes[1],
478-
wantStatus: framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align container: %s", containerName)),
478+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align container"),
479479
},
480480
{
481481
name: "Burstable QoS, requesting memory (enough on NUMA) and devices (not enough), Pod Scope Topology policy; pod doesn't fit",
482482
pod: makePodByResourceList(&v1.ResourceList{
483483
v1.ResourceMemory: resource.MustParse("2Gi"),
484484
nicResourceName: *resource.NewQuantity(6, resource.DecimalSI)}),
485485
node: nodes[2],
486-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: "),
486+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
487487
},
488488
{
489489
name: "Burstable QoS requesting memory (enough on NUMA) and devices, Pod Scope Topology policy; pod fit",
@@ -524,7 +524,7 @@ func TestNodeResourceTopology(t *testing.T) {
524524
v1.ResourceMemory: resource.MustParse("4Gi"),
525525
nicResourceName: *resource.NewQuantity(11, resource.DecimalSI)}),
526526
node: nodes[1],
527-
wantStatus: framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align container: %s", containerName)),
527+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align container"),
528528
},
529529
{
530530
name: "Burstable QoS, requesting CPU, memory (enough on NUMA) and devices (not enough), Pod Scope Topology policy; pod doesn't fit",
@@ -533,7 +533,7 @@ func TestNodeResourceTopology(t *testing.T) {
533533
v1.ResourceMemory: resource.MustParse("2Gi"),
534534
nicResourceName: *resource.NewQuantity(6, resource.DecimalSI)}),
535535
node: nodes[2],
536-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: "),
536+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
537537
},
538538
{
539539
name: "Burstable QoS requesting CPU, memory (enough on NUMA) and devices, Pod Scope Topology policy; pod fit",
@@ -588,7 +588,7 @@ func TestNodeResourceTopology(t *testing.T) {
588588
hugepages2Mi: resource.MustParse("256Mi"),
589589
nicResourceName: *resource.NewQuantity(3, resource.DecimalSI)}),
590590
node: nodes[1],
591-
wantStatus: framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align container: %s", containerName)),
591+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align container"),
592592
},
593593
{
594594
name: "Guaranteed QoS, pod doesn't fit",
@@ -597,7 +597,7 @@ func TestNodeResourceTopology(t *testing.T) {
597597
v1.ResourceMemory: resource.MustParse("1Gi"),
598598
nicResourceName: *resource.NewQuantity(3, resource.DecimalSI)}),
599599
node: nodes[0],
600-
wantStatus: framework.NewStatus(framework.Unschedulable, fmt.Sprintf("cannot align container: %s", containerName)),
600+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align container"),
601601
},
602602
{
603603
name: "Guaranteed QoS, pod fit",
@@ -615,7 +615,7 @@ func TestNodeResourceTopology(t *testing.T) {
615615
v1.ResourceMemory: resource.MustParse("1Gi"),
616616
notExistingNICResourceName: *resource.NewQuantity(0, resource.DecimalSI)}, 3),
617617
node: nodes[2],
618-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: "),
618+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
619619
},
620620
{
621621
name: "Guaranteed QoS Topology Scope, minimal, pod fit",
@@ -649,7 +649,7 @@ func TestNodeResourceTopology(t *testing.T) {
649649
v1.ResourceMemory: resource.MustParse("1Gi"),
650650
notExistingNICResourceName: *resource.NewQuantity(0, resource.DecimalSI)}, 3),
651651
node: nodes[3],
652-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: "),
652+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
653653
},
654654
{
655655
name: "Guaranteed QoS, hugepages, non-NUMA affine NIC, pod fit",
@@ -791,7 +791,7 @@ func TestNodeResourceTopologyMultiContainerPodScope(t *testing.T) {
791791
nodeTopologies[0],
792792
},
793793
avail: []resourceDescriptor{},
794-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: testpod"),
794+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
795795
},
796796
{
797797
name: "gu pod does not fit - not enough memory available on any NUMA node",
@@ -818,7 +818,7 @@ func TestNodeResourceTopologyMultiContainerPodScope(t *testing.T) {
818818
nodeTopologies[0],
819819
},
820820
avail: []resourceDescriptor{},
821-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: testpod"),
821+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
822822
},
823823
{
824824
name: "gu pod does not fit - not enough Hugepages available on any NUMA node",
@@ -845,7 +845,7 @@ func TestNodeResourceTopologyMultiContainerPodScope(t *testing.T) {
845845
nodeTopologies[0],
846846
},
847847
avail: []resourceDescriptor{},
848-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: testpod"),
848+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
849849
},
850850
{
851851
name: "gu pod does not fit - not enough devices available on any NUMA node",
@@ -872,7 +872,7 @@ func TestNodeResourceTopologyMultiContainerPodScope(t *testing.T) {
872872
nodeTopologies[0],
873873
},
874874
avail: []resourceDescriptor{},
875-
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod: testpod"),
875+
wantStatus: framework.NewStatus(framework.Unschedulable, "cannot align pod"),
876876
},
877877
}
878878

@@ -967,21 +967,21 @@ func TestNodeResourceTopologyMultiContainerContainerScope(t *testing.T) {
967967
cntReq: []map[string]string{
968968
{cpu: "40", memory: "4G"},
969969
},
970-
statusErr: "cannot align container: cnt-1",
970+
statusErr: "cannot align container", // cnt-1
971971
},
972972
{
973973
description: "[2][tier3] single container with memory over allocation - fit",
974974
cntReq: []map[string]string{
975975
{cpu: "2", memory: "100G"},
976976
},
977-
statusErr: "cannot align container: cnt-1",
977+
statusErr: "cannot align container", // cnt-1
978978
},
979979
{
980980
description: "[2][tier3] single container with cpu and memory over allocation - fit",
981981
cntReq: []map[string]string{
982982
{cpu: "40", memory: "100G"},
983983
},
984-
statusErr: "cannot align container: cnt-1",
984+
statusErr: "cannot align container", // cnt-1
985985
},
986986
{
987987
description: "[4][tier2] multi-containers with good allocation, spread across NUMAs - fit",
@@ -1006,7 +1006,7 @@ func TestNodeResourceTopologyMultiContainerContainerScope(t *testing.T) {
10061006
{cpu: "1", memory: "4G"},
10071007
{cpu: "1", memory: "4G"},
10081008
},
1009-
statusErr: "cannot align init container: cnt-1",
1009+
statusErr: "cannot align init container", // cnt-1
10101010
},
10111011
{
10121012
description: "[7][tier1] init container with memory over allocation, multi-containers with good allocation - not fit",
@@ -1017,7 +1017,7 @@ func TestNodeResourceTopologyMultiContainerContainerScope(t *testing.T) {
10171017
{cpu: "1", memory: "4G"},
10181018
{cpu: "1", memory: "4G"},
10191019
},
1020-
statusErr: "cannot align init container: cnt-1",
1020+
statusErr: "cannot align init container", // cnt-1
10211021
},
10221022
{
10231023
description: "[11][tier1] init container with good allocation, multi-containers spread across NUMAs - fit",
@@ -1053,7 +1053,7 @@ func TestNodeResourceTopologyMultiContainerContainerScope(t *testing.T) {
10531053
{cpu: "20", memory: "40G"},
10541054
{cpu: "20", memory: "6G"},
10551055
},
1056-
statusErr: "cannot align container: cnt-3",
1056+
statusErr: "cannot align container", // cnt-3
10571057
},
10581058
{
10591059
description: "[27][tier1] multi init containers with good allocation, container with cpu over allocation - not fit",
@@ -1064,7 +1064,7 @@ func TestNodeResourceTopologyMultiContainerContainerScope(t *testing.T) {
10641064
cntReq: []map[string]string{
10651065
{cpu: "35", memory: "40G"},
10661066
},
1067-
statusErr: "cannot align container: cnt-1",
1067+
statusErr: "cannot align container", // cnt-1
10681068
},
10691069
{
10701070
description: "[28][tier1] multi init containers with good allocation, multi-containers with good allocation - fit",
@@ -1114,7 +1114,7 @@ func TestNodeResourceTopologyMultiContainerContainerScope(t *testing.T) {
11141114
{cpu: "20", memory: "40G"},
11151115
{cpu: "2", memory: "6G"},
11161116
},
1117-
statusErr: "cannot align init container: cnt-1",
1117+
statusErr: "cannot align init container", // cnt-1
11181118
},
11191119
{
11201120
description: "[32][tier1] multi init containers with over memory allocation - not fit",
@@ -1127,7 +1127,7 @@ func TestNodeResourceTopologyMultiContainerContainerScope(t *testing.T) {
11271127
{cpu: "20", memory: "40G"},
11281128
{cpu: "2", memory: "6G"},
11291129
},
1130-
statusErr: "cannot align init container: cnt-2",
1130+
statusErr: "cannot align init container", // cnt-2
11311131
},
11321132
}
11331133

test/integration/noderesourcetopology_test.go

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1004,15 +1004,15 @@ func TestTopologyMatchPlugin(t *testing.T) {
10041004
{cpu: "2", memory: "6Gi", hugepages2Mi: "400Mi"},
10051005
{cpu: "2", memory: "6Gi", hugepages2Mi: "400Mi"},
10061006
},
1007-
errMsg: "cannot align container: cnt-2",
1007+
errMsg: "cannot align container", // cnt-2
10081008
},
10091009
{
10101010
description: "[5][tier1] multi containers with device over allocation, spread across NUMAs - not fit",
10111011
cntReq: []map[string]string{
10121012
{cpu: "2", memory: "6Gi", hugepages2Mi: "50Mi", nicResourceName: "20"},
10131013
{cpu: "2", memory: "6Gi", hugepages2Mi: "500Mi", nicResourceName: "20"},
10141014
},
1015-
errMsg: "cannot align container: cnt-2",
1015+
errMsg: "cannot align container", // cnt-2
10161016
},
10171017
{
10181018
description: "[7][tier1] init container with cpu over allocation, multi-containers with good allocation - not fit",
@@ -1023,7 +1023,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
10231023
{cpu: "1", memory: "4Gi"},
10241024
{cpu: "1", memory: "4Gi"},
10251025
},
1026-
errMsg: "cannot align init container: initcnt-1",
1026+
errMsg: "cannot align init container", // initcnt-1
10271027
},
10281028
{
10291029
description: "[7][tier1] init container with memory over allocation, multi-containers with good allocation - not fit",
@@ -1034,7 +1034,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
10341034
{cpu: "1", memory: "4Gi"},
10351035
{cpu: "1", memory: "4Gi"},
10361036
},
1037-
errMsg: "cannot align init container: initcnt-1",
1037+
errMsg: "cannot align init container", // initcnt-1
10381038
},
10391039
{
10401040
description: "[11][tier1] init container with good allocation, multi-containers spread across NUMAs - fit",
@@ -1056,7 +1056,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
10561056
{cpu: "20", memory: "40Gi"},
10571057
{cpu: "20", memory: "10Gi"},
10581058
},
1059-
errMsg: "cannot align container: cnt-3",
1059+
errMsg: "cannot align container", // cnt-3
10601060
},
10611061
{
10621062
description: "[12][tier1] init container with good allocation, multi-containers with memory over allocation - not fit",
@@ -1068,7 +1068,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
10681068
{cpu: "20", memory: "40Gi"},
10691069
{cpu: "2", memory: "40Gi"},
10701070
},
1071-
errMsg: "cannot align container: cnt-3",
1071+
errMsg: "cannot align container", // cnt-3
10721072
},
10731073
{
10741074
description: "[17][tier1] multi init containers with good allocation, multi-containers spread across NUMAs - fit",
@@ -1095,7 +1095,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
10951095
{cpu: "20", memory: "40Gi"},
10961096
{cpu: "20", memory: "10Gi"},
10971097
},
1098-
errMsg: "cannot align container: cnt-3",
1098+
errMsg: "cannot align container", // cnt-3
10991099
},
11001100
{
11011101
description: "[18][tier1] multi init containers with good allocation, multi-containers with memory over allocation - not fit",
@@ -1109,7 +1109,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
11091109
{cpu: "20", memory: "35Gi"},
11101110
{cpu: "2", memory: "50Gi"},
11111111
},
1112-
errMsg: "cannot align container: cnt-3",
1112+
errMsg: "cannot align container", // cnt-3
11131113
},
11141114
{
11151115
description: "[24][tier1] multi init containers with good allocation, multi-containers with cpu over allocation - not fit",
@@ -1122,7 +1122,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
11221122
{cpu: "20", memory: "40Gi"},
11231123
{cpu: "20", memory: "6Gi"},
11241124
},
1125-
errMsg: "cannot align container: cnt-3",
1125+
errMsg: "cannot align container", // cnt-3
11261126
},
11271127
{
11281128
description: "[24][tier1] multi init containers with good allocation, multi-containers with memory over allocation - not fit",
@@ -1135,7 +1135,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
11351135
{cpu: "20", memory: "35Gi"},
11361136
{cpu: "2", memory: "50Gi"},
11371137
},
1138-
errMsg: "cannot align container: cnt-3",
1138+
errMsg: "cannot align container", // cnt-3
11391139
},
11401140
{
11411141
description: "[27][tier1] multi init containers with good allocation, container with cpu over allocation - not fit",
@@ -1146,7 +1146,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
11461146
cntReq: []map[string]string{
11471147
{cpu: "35", memory: "40Gi"},
11481148
},
1149-
errMsg: "cannot align container: cnt-1",
1149+
errMsg: "cannot align container", // cnt-1
11501150
},
11511151
{
11521152
description: "[28][tier1] multi init containers with good allocation, multi-containers with good allocation - fit",
@@ -1196,7 +1196,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
11961196
{cpu: "20", memory: "40Gi"},
11971197
{cpu: "2", memory: "6Gi"},
11981198
},
1199-
errMsg: "cannot align init container: initcnt-1",
1199+
errMsg: "cannot align init container", // initcnt-1
12001200
},
12011201
{
12021202
description: "[32][tier1] multi init containers with over memory allocation - not fit",
@@ -1209,7 +1209,7 @@ func TestTopologyMatchPlugin(t *testing.T) {
12091209
{cpu: "20", memory: "40Gi"},
12101210
{cpu: "2", memory: "6Gi"},
12111211
},
1212-
errMsg: "cannot align init container: initcnt-2",
1212+
errMsg: "cannot align init container", // initcnt-2
12131213
},
12141214
}
12151215
tests = append(tests, parseTestUserEntry(scopeEqualsContainerTests, ns)...)

0 commit comments

Comments
 (0)