Skip to content

Commit ce702ec

Browse files
authored
CapacityScheduling: Fix default values for Max/Min in ElasticQuota (#520)
* CapacityScheduling: set default value of Max/Min * CapacityScheduling: add Ephemeral-storage * CapacityScheduling: add default for ElasticQuota's Spec * CapacityScheduling: handle max/min by elasticquotainfos
1 parent 56e2398 commit ce702ec

File tree

4 files changed

+513
-11
lines changed

4 files changed

+513
-11
lines changed

pkg/capacityscheduling/capacity_scheduling_test.go

Lines changed: 175 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,14 @@ package capacityscheduling
1818

1919
import (
2020
"context"
21+
"reflect"
2122
"sort"
2223
"testing"
2324

25+
"k8s.io/apimachinery/pkg/util/sets"
26+
2427
gocmp "github.com/google/go-cmp/cmp"
25-
"k8s.io/api/core/v1"
28+
v1 "k8s.io/api/core/v1"
2629
"k8s.io/apimachinery/pkg/api/resource"
2730
apiruntime "k8s.io/apimachinery/pkg/runtime"
2831
"k8s.io/apimachinery/pkg/util/wait"
@@ -40,6 +43,8 @@ import (
4043
st "k8s.io/kubernetes/pkg/scheduler/testing"
4144
imageutils "k8s.io/kubernetes/test/utils/image"
4245

46+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
47+
"sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
4348
testutil "sigs.k8s.io/scheduler-plugins/test/util"
4449
)
4550

@@ -370,6 +375,155 @@ func TestDryRunPreemption(t *testing.T) {
370375
}
371376
}
372377

378+
func TestAddElasticQuota(t *testing.T) {
379+
tests := []struct {
380+
name string
381+
ns []string
382+
elasticQuotas []interface{}
383+
expected map[string]*ElasticQuotaInfo
384+
}{
385+
{
386+
name: "Add ElasticQuota",
387+
elasticQuotas: []interface{}{
388+
makeEQ("ns1", "t1-eq1", makeResourceList(100, 1000), makeResourceList(10, 100)),
389+
},
390+
ns: []string{"ns1"},
391+
expected: map[string]*ElasticQuotaInfo{
392+
"ns1": {
393+
Namespace: "ns1",
394+
pods: sets.String{},
395+
Max: &framework.Resource{
396+
MilliCPU: 100,
397+
Memory: 1000,
398+
},
399+
Min: &framework.Resource{
400+
MilliCPU: 10,
401+
Memory: 100,
402+
},
403+
Used: &framework.Resource{
404+
MilliCPU: 0,
405+
Memory: 0,
406+
},
407+
},
408+
},
409+
},
410+
{
411+
name: "Add ElasticQuota without Max",
412+
elasticQuotas: []interface{}{
413+
makeEQ("ns1", "t1-eq1", nil, makeResourceList(10, 100)),
414+
},
415+
ns: []string{"ns1"},
416+
expected: map[string]*ElasticQuotaInfo{
417+
"ns1": {
418+
Namespace: "ns1",
419+
pods: sets.String{},
420+
Max: &framework.Resource{
421+
MilliCPU: UpperBoundOfMax,
422+
Memory: UpperBoundOfMax,
423+
EphemeralStorage: UpperBoundOfMax,
424+
},
425+
Min: &framework.Resource{
426+
MilliCPU: 10,
427+
Memory: 100,
428+
},
429+
Used: &framework.Resource{
430+
MilliCPU: 0,
431+
Memory: 0,
432+
},
433+
},
434+
},
435+
},
436+
{
437+
name: "Add ElasticQuota without Min",
438+
elasticQuotas: []interface{}{
439+
makeEQ("ns1", "t1-eq1", makeResourceList(100, 1000), nil),
440+
},
441+
ns: []string{"ns1"},
442+
expected: map[string]*ElasticQuotaInfo{
443+
"ns1": {
444+
Namespace: "ns1",
445+
pods: sets.String{},
446+
Max: &framework.Resource{
447+
MilliCPU: 100,
448+
Memory: 1000,
449+
},
450+
Min: &framework.Resource{
451+
MilliCPU: LowerBoundOfMin,
452+
Memory: LowerBoundOfMin,
453+
EphemeralStorage: LowerBoundOfMin,
454+
},
455+
Used: &framework.Resource{
456+
MilliCPU: 0,
457+
Memory: 0,
458+
},
459+
},
460+
},
461+
},
462+
{
463+
name: "Add ElasticQuota without Max and Min",
464+
elasticQuotas: []interface{}{
465+
makeEQ("ns1", "t1-eq1", nil, nil),
466+
},
467+
ns: []string{"ns1"},
468+
expected: map[string]*ElasticQuotaInfo{
469+
"ns1": {
470+
Namespace: "ns1",
471+
pods: sets.String{},
472+
Max: &framework.Resource{
473+
MilliCPU: UpperBoundOfMax,
474+
Memory: UpperBoundOfMax,
475+
EphemeralStorage: UpperBoundOfMax,
476+
},
477+
Min: &framework.Resource{
478+
MilliCPU: LowerBoundOfMin,
479+
Memory: LowerBoundOfMin,
480+
EphemeralStorage: LowerBoundOfMin,
481+
},
482+
Used: &framework.Resource{
483+
MilliCPU: 0,
484+
Memory: 0,
485+
},
486+
},
487+
},
488+
},
489+
}
490+
for _, tt := range tests {
491+
t.Run(tt.name, func(t *testing.T) {
492+
var registerPlugins []st.RegisterPluginFunc
493+
registeredPlugins := append(
494+
registerPlugins,
495+
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
496+
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
497+
)
498+
499+
fwk, err := st.NewFramework(
500+
registeredPlugins, "", wait.NeverStop,
501+
frameworkruntime.WithPodNominator(testutil.NewPodNominator(nil)),
502+
frameworkruntime.WithSnapshotSharedLister(testutil.NewFakeSharedLister(make([]*v1.Pod, 0), make([]*v1.Node, 0))),
503+
)
504+
505+
if err != nil {
506+
t.Fatal(err)
507+
}
508+
509+
cs := &CapacityScheduling{
510+
elasticQuotaInfos: map[string]*ElasticQuotaInfo{},
511+
fh: fwk,
512+
}
513+
514+
for _, elasticQuota := range tt.elasticQuotas {
515+
cs.addElasticQuota(elasticQuota)
516+
}
517+
518+
for _, ns := range tt.ns {
519+
if got := cs.elasticQuotaInfos[ns]; !reflect.DeepEqual(got, tt.expected[ns]) {
520+
t.Errorf("expected %v, got %v", tt.expected[ns], got)
521+
}
522+
}
523+
})
524+
}
525+
}
526+
373527
func makePod(podName string, namespace string, memReq int64, cpuReq int64, gpuReq int64, priority int32, uid string, nodeName string) *v1.Pod {
374528
pause := imageutils.GetPauseImageName()
375529
pod := st.MakePod().Namespace(namespace).Name(podName).Container(pause).
@@ -383,3 +537,23 @@ func makePod(podName string, namespace string, memReq int64, cpuReq int64, gpuRe
383537
}
384538
return pod
385539
}
540+
541+
func makeEQ(namespace, name string, max, min v1.ResourceList) *v1alpha1.ElasticQuota {
542+
eq := &v1alpha1.ElasticQuota{
543+
TypeMeta: metav1.TypeMeta{Kind: "ElasticQuota", APIVersion: "scheduling.sigs.k8s.io/v1alpha1"},
544+
ObjectMeta: metav1.ObjectMeta{
545+
Name: name,
546+
Namespace: namespace,
547+
},
548+
}
549+
eq.Spec.Max = max
550+
eq.Spec.Min = min
551+
return eq
552+
}
553+
554+
func makeResourceList(cpu, mem int64) v1.ResourceList {
555+
return v1.ResourceList{
556+
v1.ResourceCPU: *resource.NewMilliQuantity(cpu, resource.DecimalSI),
557+
v1.ResourceMemory: *resource.NewQuantity(mem, resource.BinarySI),
558+
}
559+
}

pkg/capacityscheduling/elasticquota.go

Lines changed: 48 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,20 @@ limitations under the License.
1717
package capacityscheduling
1818

1919
import (
20-
"k8s.io/api/core/v1"
20+
"math"
21+
22+
v1 "k8s.io/api/core/v1"
23+
"k8s.io/apimachinery/pkg/api/resource"
2124
"k8s.io/apimachinery/pkg/util/sets"
2225
"k8s.io/kubernetes/pkg/scheduler/framework"
2326
"sigs.k8s.io/scheduler-plugins/pkg/util"
2427
)
2528

29+
const (
30+
UpperBoundOfMax = math.MaxInt64
31+
LowerBoundOfMin = 0
32+
)
33+
2634
type ElasticQuotaInfos map[string]*ElasticQuotaInfo
2735

2836
func NewElasticQuotaInfos() ElasticQuotaInfos {
@@ -47,7 +55,7 @@ func (e ElasticQuotaInfos) aggregatedUsedOverMinWith(podRequest framework.Resour
4755
}
4856

4957
used.Add(util.ResourceList(&podRequest))
50-
return cmp(used, min)
58+
return cmp(used, min, LowerBoundOfMin)
5159
}
5260

5361
// ElasticQuotaInfo is a wrapper to a ElasticQuota with information.
@@ -61,6 +69,13 @@ type ElasticQuotaInfo struct {
6169
}
6270

6371
func newElasticQuotaInfo(namespace string, min, max, used v1.ResourceList) *ElasticQuotaInfo {
72+
if min == nil {
73+
min = makeResourceListForBound(LowerBoundOfMin)
74+
}
75+
if max == nil {
76+
max = makeResourceListForBound(UpperBoundOfMax)
77+
}
78+
6479
elasticQuotaInfo := &ElasticQuotaInfo{
6580
Namespace: namespace,
6681
pods: sets.NewString(),
@@ -74,6 +89,8 @@ func newElasticQuotaInfo(namespace string, min, max, used v1.ResourceList) *Elas
7489
func (e *ElasticQuotaInfo) reserveResource(request framework.Resource) {
7590
e.Used.Memory += request.Memory
7691
e.Used.MilliCPU += request.MilliCPU
92+
e.Used.EphemeralStorage += request.EphemeralStorage
93+
e.Used.AllowedPodNumber += request.AllowedPodNumber
7794
for name, value := range request.ScalarResources {
7895
e.Used.SetScalar(name, e.Used.ScalarResources[name]+value)
7996
}
@@ -82,6 +99,8 @@ func (e *ElasticQuotaInfo) reserveResource(request framework.Resource) {
8299
func (e *ElasticQuotaInfo) unreserveResource(request framework.Resource) {
83100
e.Used.Memory -= request.Memory
84101
e.Used.MilliCPU -= request.MilliCPU
102+
e.Used.EphemeralStorage -= request.EphemeralStorage
103+
e.Used.AllowedPodNumber -= request.AllowedPodNumber
85104
for name, value := range request.ScalarResources {
86105
e.Used.SetScalar(name, e.Used.ScalarResources[name]-value)
87106
}
@@ -92,23 +111,23 @@ func (e *ElasticQuotaInfo) usedOverMinWith(podRequest *framework.Resource) bool
92111
if e.Min == nil {
93112
return true
94113
}
95-
return cmp2(podRequest, e.Used, e.Min)
114+
return cmp2(podRequest, e.Used, e.Min, LowerBoundOfMin)
96115
}
97116

98117
func (e *ElasticQuotaInfo) usedOverMaxWith(podRequest *framework.Resource) bool {
99118
// "ElasticQuotaInfo doesn't have Max" means there are no limitations(infinite)
100119
if e.Max == nil {
101120
return false
102121
}
103-
return cmp2(podRequest, e.Used, e.Max)
122+
return cmp2(podRequest, e.Used, e.Max, UpperBoundOfMax)
104123
}
105124

106125
func (e *ElasticQuotaInfo) usedOverMin() bool {
107126
// "ElasticQuotaInfo doesn't have Min" means used values exceeded min(0)
108127
if e.Min == nil {
109128
return true
110129
}
111-
return cmp(e.Used, e.Min)
130+
return cmp(e.Used, e.Min, LowerBoundOfMin)
112131
}
113132

114133
func (e *ElasticQuotaInfo) clone() *ElasticQuotaInfo {
@@ -170,11 +189,11 @@ func (e *ElasticQuotaInfo) deletePodIfPresent(pod *v1.Pod) error {
170189
return nil
171190
}
172191

173-
func cmp(x, y *framework.Resource) bool {
174-
return cmp2(x, &framework.Resource{}, y)
192+
func cmp(x, y *framework.Resource, bound int64) bool {
193+
return cmp2(x, &framework.Resource{}, y, bound)
175194
}
176195

177-
func cmp2(x1, x2, y *framework.Resource) bool {
196+
func cmp2(x1, x2, y *framework.Resource, bound int64) bool {
178197
if x1.MilliCPU+x2.MilliCPU > y.MilliCPU {
179198
return true
180199
}
@@ -183,11 +202,31 @@ func cmp2(x1, x2, y *framework.Resource) bool {
183202
return true
184203
}
185204

205+
if x1.EphemeralStorage+x2.EphemeralStorage > y.EphemeralStorage {
206+
return true
207+
}
208+
209+
if x1.AllowedPodNumber+x2.AllowedPodNumber > y.AllowedPodNumber {
210+
return true
211+
}
212+
186213
for rName, rQuant := range x1.ScalarResources {
187-
if rQuant+x2.ScalarResources[rName] > y.ScalarResources[rName] {
214+
yQuant := bound
215+
if yq, ok := y.ScalarResources[rName]; ok {
216+
yQuant = yq
217+
}
218+
if rQuant+x2.ScalarResources[rName] > yQuant {
188219
return true
189220
}
190221
}
191222

192223
return false
193224
}
225+
226+
func makeResourceListForBound(bound int64) v1.ResourceList {
227+
return v1.ResourceList{
228+
v1.ResourceCPU: *resource.NewMilliQuantity(bound, resource.DecimalSI),
229+
v1.ResourceMemory: *resource.NewQuantity(bound, resource.BinarySI),
230+
v1.ResourceEphemeralStorage: *resource.NewQuantity(bound, resource.BinarySI),
231+
}
232+
}

0 commit comments

Comments
 (0)