Skip to content

Commit 55c0bb9

Browse files
authored
Merge pull request #558 from PiotrProkop/nodelocking-integration-tests
Add integration tests for DiscardReservedNodes NRT cache type
2 parents ab6c864 + 84a29f3 commit 55c0bb9

File tree

2 files changed

+169
-7
lines changed

2 files changed

+169
-7
lines changed

pkg/noderesourcetopology/pluginhelpers.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,10 @@ func initNodeTopologyInformer(tcfg *apiconfig.NodeResourceTopologyMatchArgs, han
5151
topologyInformerFactory.Start(ctx.Done())
5252
topologyInformerFactory.WaitForCacheSync(ctx.Done())
5353

54+
if tcfg.DiscardReservedNodes {
55+
return nrtcache.NewDiscardReserved(nodeTopologyLister), nil
56+
}
57+
5458
if tcfg.CacheResyncPeriodSeconds <= 0 {
5559
return nrtcache.NewPassthrough(nodeTopologyLister), nil
5660
}

test/integration/noderesourcetopology_cache_test.go

Lines changed: 165 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ import (
3535
"k8s.io/klog/v2"
3636
"k8s.io/kubernetes/pkg/scheduler"
3737
schedapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
38+
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
39+
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
3840
fwkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
3941
st "k8s.io/kubernetes/pkg/scheduler/testing"
4042

@@ -50,24 +52,26 @@ import (
5052
const (
5153
defaultCacheResyncPeriodSeconds int64 = 5
5254
anyNode = "*"
55+
discardReservedSchedulerName = "discardReserved"
5356
)
5457

5558
var (
5659
schedVerbose = "0"
5760
)
5861

5962
type podDesc struct {
60-
podName string
61-
isGuaranteed bool
62-
isDelete bool
63-
resourcesMap map[string]string
64-
expectedNode string
63+
schedulerName string
64+
podName string
65+
isGuaranteed bool
66+
isDelete bool
67+
resourcesMap map[string]string
68+
expectedNode string
6569
// autogenerated
6670
pod *corev1.Pod
6771
}
6872

6973
func (p *podDesc) SetupPod(ns string, initContainer bool) {
70-
pt := st.MakePod().Namespace(ns).Name(p.podName)
74+
pt := st.MakePod().Namespace(ns).Name(p.podName).SchedulerName(p.schedulerName)
7175
if p.isGuaranteed {
7276
p.pod = util.WithLimits(pt, p.resourcesMap, initContainer).Obj()
7377
} else {
@@ -264,6 +268,109 @@ func TestTopologyCachePluginWithoutUpdates(t *testing.T) {
264268
}).Obj(),
265269
},
266270
},
271+
{
272+
name: "GU pod: DiscardReservedNodes: allows scheduling on both Zones",
273+
podDescs: []podDesc{
274+
{
275+
podName: "nrt-pod-1000",
276+
isGuaranteed: true,
277+
resourcesMap: map[string]string{
278+
string(corev1.ResourceCPU): "16",
279+
string(corev1.ResourceMemory): "24Gi",
280+
},
281+
expectedNode: "fake-node-cache-1",
282+
schedulerName: discardReservedSchedulerName,
283+
},
284+
{
285+
podName: "nrt-pod-2000",
286+
isGuaranteed: true,
287+
resourcesMap: map[string]string{
288+
string(corev1.ResourceCPU): "16",
289+
string(corev1.ResourceMemory): "24Gi",
290+
},
291+
schedulerName: discardReservedSchedulerName,
292+
expectedNode: "fake-node-cache-1",
293+
},
294+
},
295+
nodeResourceTopologies: []*topologyv1alpha2.NodeResourceTopology{
296+
MakeNRT().Name("fake-node-cache-1").Policy(topologyv1alpha2.SingleNUMANodeContainerLevel).
297+
Zone(
298+
topologyv1alpha2.ResourceInfoList{
299+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "30"),
300+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "60Gi"),
301+
}).
302+
Zone(
303+
topologyv1alpha2.ResourceInfoList{
304+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "30"),
305+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "62Gi"),
306+
}).Obj(),
307+
MakeNRT().Name("fake-node-cache-2").Policy(topologyv1alpha2.SingleNUMANodeContainerLevel).
308+
Zone(
309+
topologyv1alpha2.ResourceInfoList{
310+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "10"),
311+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "14Gi"),
312+
}).
313+
Zone(
314+
topologyv1alpha2.ResourceInfoList{
315+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "8"),
316+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "10Gi"),
317+
}).Obj(),
318+
},
319+
},
320+
{
321+
name: "GU pod: DiscardReservedNodes: new pod is successfully scheduled on the node, after deleting pod consuming most resources",
322+
podDescs: []podDesc{
323+
{
324+
podName: "nrt-pod-3000",
325+
isGuaranteed: true,
326+
resourcesMap: map[string]string{
327+
string(corev1.ResourceCPU): "30",
328+
string(corev1.ResourceMemory): "60Gi",
329+
},
330+
schedulerName: discardReservedSchedulerName,
331+
expectedNode: "fake-node-cache-1",
332+
},
333+
{
334+
podName: "nrt-pod-3000",
335+
isDelete: true,
336+
schedulerName: "discardReserved",
337+
},
338+
{
339+
podName: "nrt-pod-4000",
340+
isGuaranteed: true,
341+
resourcesMap: map[string]string{
342+
string(corev1.ResourceCPU): "16",
343+
string(corev1.ResourceMemory): "24Gi",
344+
},
345+
schedulerName: discardReservedSchedulerName,
346+
expectedNode: "fake-node-cache-1",
347+
},
348+
},
349+
nodeResourceTopologies: []*topologyv1alpha2.NodeResourceTopology{
350+
MakeNRT().Name("fake-node-cache-1").Policy(topologyv1alpha2.SingleNUMANodeContainerLevel).
351+
Zone(
352+
topologyv1alpha2.ResourceInfoList{
353+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "30"),
354+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "60Gi"),
355+
}).
356+
Zone(
357+
topologyv1alpha2.ResourceInfoList{
358+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "30"),
359+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "62Gi"),
360+
}).Obj(),
361+
MakeNRT().Name("fake-node-cache-2").Policy(topologyv1alpha2.SingleNUMANodeContainerLevel).
362+
Zone(
363+
topologyv1alpha2.ResourceInfoList{
364+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "10"),
365+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "14Gi"),
366+
}).
367+
Zone(
368+
topologyv1alpha2.ResourceInfoList{
369+
noderesourcetopology.MakeTopologyResInfo(cpu, "32", "8"),
370+
noderesourcetopology.MakeTopologyResInfo(memory, "64Gi", "10Gi"),
371+
}).Obj(),
372+
},
373+
},
267374
} {
268375
t.Run(tt.name, func(t *testing.T) {
269376
// because caching, each testcase needs to run from a clean slate
@@ -297,6 +404,8 @@ func TestTopologyCachePluginWithoutUpdates(t *testing.T) {
297404
},
298405
})
299406

407+
cfg.Profiles = append(cfg.Profiles, discardReservedSchedulerProfile())
408+
300409
defer func() {
301410
cleanupTest(t, testCtx)
302411
klog.Infof("test environment cleaned up")
@@ -356,7 +465,7 @@ func TestTopologyCachePluginWithoutUpdates(t *testing.T) {
356465
t.Fatalf("Failed to delete Pod %q: %v", p.podName, err)
357466
}
358467
} else {
359-
klog.Infof("Creating Pod %q", p.pod.Name)
468+
klog.Infof("Creating Pod %q: scheduler: %q", p.pod.Name, p.pod.Spec.SchedulerName)
360469
_, err := cs.CoreV1().Pods(ns).Create(testCtx.Ctx, p.pod, metav1.CreateOptions{})
361470
if err != nil {
362471
t.Fatalf("Failed to create Pod %q: %v", p.pod.Name, err)
@@ -698,3 +807,52 @@ func mkPFP(nodeName string, pods ...*corev1.Pod) string {
698807
klog.Infof("PFP for %q: %s", nodeName, st.Repr())
699808
return pfp
700809
}
810+
811+
func discardReservedSchedulerProfile() schedapi.KubeSchedulerProfile {
812+
nodeLockingMatchArgs := schedconfig.NodeResourceTopologyMatchArgs{
813+
ScoringStrategy: schedconfig.ScoringStrategy{Type: schedconfig.LeastAllocated},
814+
DiscardReservedNodes: true,
815+
}
816+
817+
return schedapi.KubeSchedulerProfile{
818+
SchedulerName: discardReservedSchedulerName,
819+
Plugins: &schedapi.Plugins{
820+
QueueSort: schedapi.PluginSet{
821+
Enabled: []schedapi.Plugin{
822+
{Name: queuesort.Name},
823+
},
824+
},
825+
Filter: schedapi.PluginSet{
826+
Enabled: []schedapi.Plugin{
827+
{Name: noderesourcetopology.Name},
828+
},
829+
},
830+
Score: schedapi.PluginSet{
831+
Enabled: []schedapi.Plugin{
832+
{Name: noderesourcetopology.Name},
833+
},
834+
},
835+
Reserve: schedapi.PluginSet{
836+
Enabled: []schedapi.Plugin{
837+
{Name: noderesourcetopology.Name},
838+
},
839+
},
840+
PostBind: schedapi.PluginSet{
841+
Enabled: []schedapi.Plugin{
842+
{Name: noderesourcetopology.Name},
843+
},
844+
},
845+
Bind: schedapi.PluginSet{
846+
Enabled: []schedapi.Plugin{
847+
{Name: defaultbinder.Name},
848+
},
849+
},
850+
},
851+
PluginConfig: []schedapi.PluginConfig{
852+
{
853+
Name: noderesourcetopology.Name,
854+
Args: &nodeLockingMatchArgs,
855+
},
856+
},
857+
}
858+
}

0 commit comments

Comments
 (0)