@@ -74,6 +74,8 @@ type nrtTestUserEntry struct {
74
74
cntReq []map [string ]string
75
75
errMsg string
76
76
// this testing batch is going to be run against the same node and NRT objects, hence we're not specifying them.
77
+ isBurstable bool
78
+ expectedNodes []string
77
79
}
78
80
79
81
type nrtTestEntry struct {
@@ -211,11 +213,12 @@ func TestTopologyMatchPlugin(t *testing.T) {
211
213
212
214
// Create a Node.
213
215
resList := map [v1.ResourceName ]string {
214
- v1 .ResourceCPU : "64" ,
215
- v1 .ResourceMemory : "128Gi" ,
216
- v1 .ResourcePods : "32" ,
217
- hugepages2Mi : "896Mi" ,
218
- nicResourceName : "48" ,
216
+ v1 .ResourceCPU : "64" ,
217
+ v1 .ResourceMemory : "128Gi" ,
218
+ v1 .ResourcePods : "32" ,
219
+ hugepages2Mi : "896Mi" ,
220
+ nicResourceName : "48" ,
221
+ v1 .ResourceEphemeralStorage : "32Gi" ,
219
222
}
220
223
for _ , nodeName := range []string {"fake-node-1" , "fake-node-2" } {
221
224
newNode := st .MakeNode ().Name (nodeName ).Label ("node" , nodeName ).Capacity (resList ).Obj ()
@@ -1959,6 +1962,75 @@ func TestTopologyMatchPlugin(t *testing.T) {
1959
1962
},
1960
1963
errMsg : "cannot align init container" , // initcnt-2
1961
1964
},
1965
+ // ephemeral storage
1966
+ {
1967
+ description : "[tier1] single containers one requiring ephemeral storage with good allocation - fit" ,
1968
+ cntReq : []map [string ]string {
1969
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "1Gi" },
1970
+ },
1971
+ },
1972
+ {
1973
+ description : "[tier1] multi containers all requiring ephemeral storage with good allocation - fit" ,
1974
+ cntReq : []map [string ]string {
1975
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "1Gi" },
1976
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "512Mi" },
1977
+ {cpu : "4" , memory : "8Gi" , ephemeralStorage : "2Gi" },
1978
+ },
1979
+ },
1980
+ {
1981
+ description : "[tier1] multi containers some requiring ephemeral storage with good allocation - fit" ,
1982
+ cntReq : []map [string ]string {
1983
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "1Gi" },
1984
+ {cpu : "2" , memory : "4Gi" },
1985
+ {cpu : "4" , memory : "8Gi" , ephemeralStorage : "2Gi" },
1986
+ },
1987
+ },
1988
+ {
1989
+ description : "[tier1] multi containers one requiring ephemeral storage with good allocation - fit" ,
1990
+ cntReq : []map [string ]string {
1991
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "1Gi" },
1992
+ {cpu : "2" , memory : "4Gi" },
1993
+ {cpu : "4" , memory : "8Gi" },
1994
+ },
1995
+ },
1996
+ {
1997
+ description : "[tier1][burstable] single containers one requiring ephemeral storage with good allocation - fit" ,
1998
+ cntReq : []map [string ]string {
1999
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "1Gi" },
2000
+ },
2001
+ isBurstable : true ,
2002
+ expectedNodes : []string {"fake-node-1" , "fake-node-2" }, // any node
2003
+ },
2004
+ {
2005
+ description : "[tier1][burstable] multi containers all requiring ephemeral storage with good allocation - fit" ,
2006
+ cntReq : []map [string ]string {
2007
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "1Gi" },
2008
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "512Mi" },
2009
+ {cpu : "4" , memory : "8Gi" , ephemeralStorage : "2Gi" },
2010
+ },
2011
+ isBurstable : true ,
2012
+ expectedNodes : []string {"fake-node-1" , "fake-node-2" }, // any node
2013
+ },
2014
+ {
2015
+ description : "[tier1][burstable] multi containers some requiring ephemeral storage with good allocation - fit" ,
2016
+ cntReq : []map [string ]string {
2017
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "1Gi" },
2018
+ {cpu : "2" , memory : "4Gi" },
2019
+ {cpu : "4" , memory : "8Gi" , ephemeralStorage : "2Gi" },
2020
+ },
2021
+ isBurstable : true ,
2022
+ expectedNodes : []string {"fake-node-1" , "fake-node-2" }, // any node
2023
+ },
2024
+ {
2025
+ description : "[tier1][burstable] multi containers one requiring ephemeral storage with good allocation - fit" ,
2026
+ cntReq : []map [string ]string {
2027
+ {cpu : "2" , memory : "4Gi" , ephemeralStorage : "1Gi" },
2028
+ {cpu : "2" , memory : "4Gi" },
2029
+ {cpu : "4" , memory : "8Gi" },
2030
+ },
2031
+ isBurstable : true ,
2032
+ expectedNodes : []string {"fake-node-1" , "fake-node-2" }, // any node
2033
+ },
1962
2034
}
1963
2035
tests = append (tests , parseTestUserEntry (scopeEqualsContainerTests , ns )... )
1964
2036
@@ -2084,13 +2156,26 @@ func makeProfileByPluginArgs(
2084
2156
func parseTestUserEntry (entries []nrtTestUserEntry , ns string ) []nrtTestEntry {
2085
2157
var teList []nrtTestEntry
2086
2158
for i , e := range entries {
2159
+ desiredQoS := v1 .PodQOSGuaranteed
2160
+ if e .isBurstable {
2161
+ desiredQoS = v1 .PodQOSBurstable
2162
+ }
2163
+
2087
2164
p := st .MakePod ().Name (fmt .Sprintf ("%s-%d" , testPodName , i + 1 )).Namespace (ns )
2088
2165
for _ , req := range e .initCntReq {
2089
- p = util .WithLimits (p , req , true )
2166
+ if desiredQoS == v1 .PodQOSGuaranteed {
2167
+ p = util .WithLimits (p , req , true )
2168
+ } else {
2169
+ p = util .WithRequests (p , req , true )
2170
+ }
2090
2171
}
2091
2172
2092
2173
for _ , req := range e .cntReq {
2093
- p = util .WithLimits (p , req , false )
2174
+ if desiredQoS == v1 .PodQOSGuaranteed {
2175
+ p = util .WithLimits (p , req , false )
2176
+ } else {
2177
+ p = util .WithRequests (p , req , false )
2178
+ }
2094
2179
}
2095
2180
nodeTopologies := []* topologyv1alpha2.NodeResourceTopology {
2096
2181
MakeNRT ().Name ("fake-node-1" ).
@@ -2146,9 +2231,11 @@ func parseTestUserEntry(entries []nrtTestUserEntry, ns string) []nrtTestEntry {
2146
2231
}).Obj (),
2147
2232
}
2148
2233
expectedNodes := []string {"fake-node-1" }
2149
- // if there's an error we expect the pod
2150
- // to not be found on any node
2151
- if len (e .errMsg ) > 0 {
2234
+ if len (e .expectedNodes ) > 0 {
2235
+ expectedNodes = e .expectedNodes
2236
+ } else if len (e .errMsg ) > 0 {
2237
+ // if there's an error we expect the pod
2238
+ // to not be found on any node
2152
2239
expectedNodes = []string {}
2153
2240
}
2154
2241
te := nrtTestEntry {
0 commit comments