Skip to content

Commit e3388b9

Browse files
committed
nrt: add integration test coverage for the ephemeral storage
The ephemeral storage resource is not a deciding factor for noderesourcetopology filtering, but it was incorrectly accounted causing bad scheduling decisions. First, we add some integration test coverage to catch these issues. Signed-off-by: Francesco Romani <[email protected]>
1 parent 7a8afdf commit e3388b9

File tree

2 files changed

+103
-15
lines changed

2 files changed

+103
-15
lines changed

test/integration/noderesourcetopology_test.go

Lines changed: 97 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,8 @@ type nrtTestUserEntry struct {
7474
cntReq []map[string]string
7575
errMsg string
7676
// this testing batch is going to be run against the same node and NRT objects, hence we're not specifying them.
77+
isBurstable bool
78+
expectedNodes []string
7779
}
7880

7981
type nrtTestEntry struct {
@@ -211,11 +213,12 @@ func TestTopologyMatchPlugin(t *testing.T) {
211213

212214
// Create a Node.
213215
resList := map[v1.ResourceName]string{
214-
v1.ResourceCPU: "64",
215-
v1.ResourceMemory: "128Gi",
216-
v1.ResourcePods: "32",
217-
hugepages2Mi: "896Mi",
218-
nicResourceName: "48",
216+
v1.ResourceCPU: "64",
217+
v1.ResourceMemory: "128Gi",
218+
v1.ResourcePods: "32",
219+
hugepages2Mi: "896Mi",
220+
nicResourceName: "48",
221+
v1.ResourceEphemeralStorage: "32Gi",
219222
}
220223
for _, nodeName := range []string{"fake-node-1", "fake-node-2"} {
221224
newNode := st.MakeNode().Name(nodeName).Label("node", nodeName).Capacity(resList).Obj()
@@ -1959,6 +1962,75 @@ func TestTopologyMatchPlugin(t *testing.T) {
19591962
},
19601963
errMsg: "cannot align init container", // initcnt-2
19611964
},
1965+
// ephemeral storage
1966+
{
1967+
description: "[tier1] single containers one requiring ephemeral storage with good allocation - fit",
1968+
cntReq: []map[string]string{
1969+
{cpu: "2", memory: "4Gi", ephemeralStorage: "1Gi"},
1970+
},
1971+
},
1972+
{
1973+
description: "[tier1] multi containers all requiring ephemeral storage with good allocation - fit",
1974+
cntReq: []map[string]string{
1975+
{cpu: "2", memory: "4Gi", ephemeralStorage: "1Gi"},
1976+
{cpu: "2", memory: "4Gi", ephemeralStorage: "512Mi"},
1977+
{cpu: "4", memory: "8Gi", ephemeralStorage: "2Gi"},
1978+
},
1979+
},
1980+
{
1981+
description: "[tier1] multi containers some requiring ephemeral storage with good allocation - fit",
1982+
cntReq: []map[string]string{
1983+
{cpu: "2", memory: "4Gi", ephemeralStorage: "1Gi"},
1984+
{cpu: "2", memory: "4Gi"},
1985+
{cpu: "4", memory: "8Gi", ephemeralStorage: "2Gi"},
1986+
},
1987+
},
1988+
{
1989+
description: "[tier1] multi containers one requiring ephemeral storage with good allocation - fit",
1990+
cntReq: []map[string]string{
1991+
{cpu: "2", memory: "4Gi", ephemeralStorage: "1Gi"},
1992+
{cpu: "2", memory: "4Gi"},
1993+
{cpu: "4", memory: "8Gi"},
1994+
},
1995+
},
1996+
{
1997+
description: "[tier1][burstable] single containers one requiring ephemeral storage with good allocation - fit",
1998+
cntReq: []map[string]string{
1999+
{cpu: "2", memory: "4Gi", ephemeralStorage: "1Gi"},
2000+
},
2001+
isBurstable: true,
2002+
expectedNodes: []string{"fake-node-1", "fake-node-2"}, // any node
2003+
},
2004+
{
2005+
description: "[tier1][burstable] multi containers all requiring ephemeral storage with good allocation - fit",
2006+
cntReq: []map[string]string{
2007+
{cpu: "2", memory: "4Gi", ephemeralStorage: "1Gi"},
2008+
{cpu: "2", memory: "4Gi", ephemeralStorage: "512Mi"},
2009+
{cpu: "4", memory: "8Gi", ephemeralStorage: "2Gi"},
2010+
},
2011+
isBurstable: true,
2012+
expectedNodes: []string{"fake-node-1", "fake-node-2"}, // any node
2013+
},
2014+
{
2015+
description: "[tier1][burstable] multi containers some requiring ephemeral storage with good allocation - fit",
2016+
cntReq: []map[string]string{
2017+
{cpu: "2", memory: "4Gi", ephemeralStorage: "1Gi"},
2018+
{cpu: "2", memory: "4Gi"},
2019+
{cpu: "4", memory: "8Gi", ephemeralStorage: "2Gi"},
2020+
},
2021+
isBurstable: true,
2022+
expectedNodes: []string{"fake-node-1", "fake-node-2"}, // any node
2023+
},
2024+
{
2025+
description: "[tier1][burstable] multi containers one requiring ephemeral storage with good allocation - fit",
2026+
cntReq: []map[string]string{
2027+
{cpu: "2", memory: "4Gi", ephemeralStorage: "1Gi"},
2028+
{cpu: "2", memory: "4Gi"},
2029+
{cpu: "4", memory: "8Gi"},
2030+
},
2031+
isBurstable: true,
2032+
expectedNodes: []string{"fake-node-1", "fake-node-2"}, // any node
2033+
},
19622034
}
19632035
tests = append(tests, parseTestUserEntry(scopeEqualsContainerTests, ns)...)
19642036

@@ -2084,13 +2156,26 @@ func makeProfileByPluginArgs(
20842156
func parseTestUserEntry(entries []nrtTestUserEntry, ns string) []nrtTestEntry {
20852157
var teList []nrtTestEntry
20862158
for i, e := range entries {
2159+
desiredQoS := v1.PodQOSGuaranteed
2160+
if e.isBurstable {
2161+
desiredQoS = v1.PodQOSBurstable
2162+
}
2163+
20872164
p := st.MakePod().Name(fmt.Sprintf("%s-%d", testPodName, i+1)).Namespace(ns)
20882165
for _, req := range e.initCntReq {
2089-
p = util.WithLimits(p, req, true)
2166+
if desiredQoS == v1.PodQOSGuaranteed {
2167+
p = util.WithLimits(p, req, true)
2168+
} else {
2169+
p = util.WithRequests(p, req, true)
2170+
}
20902171
}
20912172

20922173
for _, req := range e.cntReq {
2093-
p = util.WithLimits(p, req, false)
2174+
if desiredQoS == v1.PodQOSGuaranteed {
2175+
p = util.WithLimits(p, req, false)
2176+
} else {
2177+
p = util.WithRequests(p, req, false)
2178+
}
20942179
}
20952180
nodeTopologies := []*topologyv1alpha2.NodeResourceTopology{
20962181
MakeNRT().Name("fake-node-1").
@@ -2146,9 +2231,11 @@ func parseTestUserEntry(entries []nrtTestUserEntry, ns string) []nrtTestEntry {
21462231
}).Obj(),
21472232
}
21482233
expectedNodes := []string{"fake-node-1"}
2149-
// if there's an error we expect the pod
2150-
// to not be found on any node
2151-
if len(e.errMsg) > 0 {
2234+
if len(e.expectedNodes) > 0 {
2235+
expectedNodes = e.expectedNodes
2236+
} else if len(e.errMsg) > 0 {
2237+
// if there's an error we expect the pod
2238+
// to not be found on any node
21522239
expectedNodes = []string{}
21532240
}
21542241
te := nrtTestEntry{

test/integration/nrtutils.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -42,11 +42,12 @@ import (
4242
)
4343

4444
const (
45-
cpu = string(corev1.ResourceCPU)
46-
memory = string(corev1.ResourceMemory)
47-
gpuResourceName = "vendor/gpu"
48-
hugepages2Mi = "hugepages-2Mi"
49-
nicResourceName = "vendor/nic1"
45+
cpu = string(corev1.ResourceCPU)
46+
memory = string(corev1.ResourceMemory)
47+
gpuResourceName = "vendor/gpu"
48+
hugepages2Mi = "hugepages-2Mi"
49+
nicResourceName = "vendor/nic1"
50+
ephemeralStorage = string(corev1.ResourceEphemeralStorage)
5051
)
5152

5253
func waitForNRT(cs *clientset.Clientset) error {

0 commit comments

Comments
 (0)