Skip to content

Commit 84a29f3

Browse files
committed
Add integration tests for DiscardReservedNodes NRT cache
Signed-off-by: pprokop <[email protected]>
1 parent 527cec2 commit 84a29f3

File tree

1 file changed

+165
-7
lines changed

1 file changed

+165
-7
lines changed

test/integration/noderesourcetopology_cache_test.go

Lines changed: 165 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ import (
3535
"k8s.io/klog/v2"
3636
"k8s.io/kubernetes/pkg/scheduler"
3737
schedapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
38+
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
39+
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
3840
fwkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
3941
st "k8s.io/kubernetes/pkg/scheduler/testing"
4042

@@ -50,24 +52,26 @@ import (
5052
const (
5153
defaultCacheResyncPeriodSeconds int64 = 5
5254
anyNode = "*"
55+
discardReservedSchedulerName = "discardReserved"
5356
)
5457

5558
var (
5659
schedVerbose = "0"
5760
)
5861

5962
type podDesc struct {
60-
podName string
61-
isGuaranteed bool
62-
isDelete bool
63-
resourcesMap map[string]string
64-
expectedNode string
63+
schedulerName string
64+
podName string
65+
isGuaranteed bool
66+
isDelete bool
67+
resourcesMap map[string]string
68+
expectedNode string
6569
// autogenerated
6670
pod *corev1.Pod
6771
}
6872

6973
func (p *podDesc) SetupPod(ns string, initContainer bool) {
70-
pt := st.MakePod().Namespace(ns).Name(p.podName)
74+
pt := st.MakePod().Namespace(ns).Name(p.podName).SchedulerName(p.schedulerName)
7175
if p.isGuaranteed {
7276
p.pod = util.WithLimits(pt, p.resourcesMap, initContainer).Obj()
7377
} else {
@@ -264,6 +268,109 @@ func TestTopologyCachePluginWithoutUpdates(t *testing.T) {
264268
}).Obj(),
265269
},
266270
},
271+
{
272+
name: "GU pod: DiscardReservedNodes: allows scheduling on both Zones",
273+
podDescs: []podDesc{
274+
{
275+
podName: "nrt-pod-1000",
276+
isGuaranteed: true,
277+
resourcesMap: map[string]string{
278+
string(corev1.ResourceCPU): "16",
279+
string(corev1.ResourceMemory): "24Gi",
280+
},
281+
expectedNode: "fake-node-cache-1",
282+
schedulerName: discardReservedSchedulerName,
283+
},
284+
{
285+
podName: "nrt-pod-2000",
286+
isGuaranteed: true,
287+
resourcesMap: map[string]string{
288+
string(corev1.ResourceCPU): "16",
289+
string(corev1.ResourceMemory): "24Gi",
290+
},
291+
schedulerName: discardReservedSchedulerName,
292+
expectedNode: "fake-node-cache-1",
293+
},
294+
},
295+
nodeResourceTopologies: []*topologyv1alpha2.NodeResourceTopology{
296+
MakeNRT().Name("fake-node-cache-1").Policy(topologyv1alpha2.SingleNUMANodeContainerLevel).
297+
Zone(
298+
topologyv1alpha2.ResourceInfoList{
299+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "30"),
300+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "60Gi"),
301+
}).
302+
Zone(
303+
topologyv1alpha2.ResourceInfoList{
304+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "30"),
305+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "62Gi"),
306+
}).Obj(),
307+
MakeNRT().Name("fake-node-cache-2").Policy(topologyv1alpha2.SingleNUMANodeContainerLevel).
308+
Zone(
309+
topologyv1alpha2.ResourceInfoList{
310+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "10"),
311+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "14Gi"),
312+
}).
313+
Zone(
314+
topologyv1alpha2.ResourceInfoList{
315+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "8"),
316+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "10Gi"),
317+
}).Obj(),
318+
},
319+
},
320+
{
321+
name: "GU pod: DiscardReservedNodes: new pod is successfully scheduled on the node, after deleting pod consuming most resources",
322+
podDescs: []podDesc{
323+
{
324+
podName: "nrt-pod-3000",
325+
isGuaranteed: true,
326+
resourcesMap: map[string]string{
327+
string(corev1.ResourceCPU): "30",
328+
string(corev1.ResourceMemory): "60Gi",
329+
},
330+
schedulerName: discardReservedSchedulerName,
331+
expectedNode: "fake-node-cache-1",
332+
},
333+
{
334+
podName: "nrt-pod-3000",
335+
isDelete: true,
336+
schedulerName: "discardReserved",
337+
},
338+
{
339+
podName: "nrt-pod-4000",
340+
isGuaranteed: true,
341+
resourcesMap: map[string]string{
342+
string(corev1.ResourceCPU): "16",
343+
string(corev1.ResourceMemory): "24Gi",
344+
},
345+
schedulerName: discardReservedSchedulerName,
346+
expectedNode: "fake-node-cache-1",
347+
},
348+
},
349+
nodeResourceTopologies: []*topologyv1alpha2.NodeResourceTopology{
350+
MakeNRT().Name("fake-node-cache-1").Policy(topologyv1alpha2.SingleNUMANodeContainerLevel).
351+
Zone(
352+
topologyv1alpha2.ResourceInfoList{
353+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "30"),
354+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "60Gi"),
355+
}).
356+
Zone(
357+
topologyv1alpha2.ResourceInfoList{
358+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "30"),
359+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "62Gi"),
360+
}).Obj(),
361+
MakeNRT().Name("fake-node-cache-2").Policy(topologyv1alpha2.SingleNUMANodeContainerLevel).
362+
Zone(
363+
topologyv1alpha2.ResourceInfoList{
364+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "10"),
365+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "14Gi"),
366+
}).
367+
Zone(
368+
topologyv1alpha2.ResourceInfoList{
369+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "8"),
370+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "10Gi"),
371+
}).Obj(),
372+
},
373+
},
267374
} {
268375
t.Run(tt.name, func(t *testing.T) {
269376
// because caching, each testcase needs to run from a clean slate
@@ -297,6 +404,8 @@ func TestTopologyCachePluginWithoutUpdates(t *testing.T) {
297404
},
298405
})
299406

407+
cfg.Profiles = append(cfg.Profiles, discardReservedSchedulerProfile())
408+
300409
defer func() {
301410
cleanupTest(t, testCtx)
302411
klog.Infof("test environment cleaned up")
@@ -356,7 +465,7 @@ func TestTopologyCachePluginWithoutUpdates(t *testing.T) {
356465
t.Fatalf("Failed to delete Pod %q: %v", p.podName, err)
357466
}
358467
} else {
359-
klog.Infof("Creating Pod %q", p.pod.Name)
468+
klog.Infof("Creating Pod %q: scheduler: %q", p.pod.Name, p.pod.Spec.SchedulerName)
360469
_, err := cs.CoreV1().Pods(ns).Create(testCtx.Ctx, p.pod, metav1.CreateOptions{})
361470
if err != nil {
362471
t.Fatalf("Failed to create Pod %q: %v", p.pod.Name, err)
@@ -689,3 +798,52 @@ func mkPFP(nodeName string, pods ...*corev1.Pod) string {
689798
klog.Infof("PFP for %q: %s", nodeName, st.Repr())
690799
return pfp
691800
}
801+
802+
func discardReservedSchedulerProfile() schedapi.KubeSchedulerProfile {
803+
nodeLockingMatchArgs := schedconfig.NodeResourceTopologyMatchArgs{
804+
ScoringStrategy: schedconfig.ScoringStrategy{Type: schedconfig.LeastAllocated},
805+
DiscardReservedNodes: true,
806+
}
807+
808+
return schedapi.KubeSchedulerProfile{
809+
SchedulerName: discardReservedSchedulerName,
810+
Plugins: &schedapi.Plugins{
811+
QueueSort: schedapi.PluginSet{
812+
Enabled: []schedapi.Plugin{
813+
{Name: queuesort.Name},
814+
},
815+
},
816+
Filter: schedapi.PluginSet{
817+
Enabled: []schedapi.Plugin{
818+
{Name: noderesourcetopology.Name},
819+
},
820+
},
821+
Score: schedapi.PluginSet{
822+
Enabled: []schedapi.Plugin{
823+
{Name: noderesourcetopology.Name},
824+
},
825+
},
826+
Reserve: schedapi.PluginSet{
827+
Enabled: []schedapi.Plugin{
828+
{Name: noderesourcetopology.Name},
829+
},
830+
},
831+
PostBind: schedapi.PluginSet{
832+
Enabled: []schedapi.Plugin{
833+
{Name: noderesourcetopology.Name},
834+
},
835+
},
836+
Bind: schedapi.PluginSet{
837+
Enabled: []schedapi.Plugin{
838+
{Name: defaultbinder.Name},
839+
},
840+
},
841+
},
842+
PluginConfig: []schedapi.PluginConfig{
843+
{
844+
Name: noderesourcetopology.Name,
845+
Args: &nodeLockingMatchArgs,
846+
},
847+
},
848+
}
849+
}

0 commit comments

Comments
 (0)