Skip to content

Commit cd3aac3

Browse files
author
draveness
committed
feat: cache pod limits as part of metadata in priority functions
1 parent c85c0e4 commit cd3aac3

File tree

4 files changed

+40
-9
lines changed

4 files changed

+40
-9
lines changed

pkg/scheduler/algorithm/priorities/metadata.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle
4646
// priorityMetadata is a type that is passed as metadata for priority functions
4747
type priorityMetadata struct {
4848
nonZeroRequest *schedulernodeinfo.Resource
49+
podLimits *schedulernodeinfo.Resource
4950
podTolerations []v1.Toleration
5051
affinity *v1.Affinity
5152
podSelectors []labels.Selector
@@ -62,6 +63,7 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo
6263
}
6364
return &priorityMetadata{
6465
nonZeroRequest: getNonZeroRequests(pod),
66+
podLimits: getResourceLimits(pod),
6567
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
6668
affinity: pod.Spec.Affinity,
6769
podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),

pkg/scheduler/algorithm/priorities/metadata_test.go

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,12 @@ func TestPriorityMetadata(t *testing.T) {
3838
specifiedReqs.MilliCPU = 200
3939
specifiedReqs.Memory = 2000
4040

41+
nonPodLimits := &schedulernodeinfo.Resource{}
42+
43+
specifiedPodLimits := &schedulernodeinfo.Resource{}
44+
specifiedPodLimits.MilliCPU = 200
45+
specifiedPodLimits.Memory = 2000
46+
4147
tolerations := []v1.Toleration{{
4248
Key: "foo",
4349
Operator: v1.TolerationOpEqual,
@@ -104,6 +110,10 @@ func TestPriorityMetadata(t *testing.T) {
104110
Image: "image",
105111
ImagePullPolicy: "Always",
106112
Resources: v1.ResourceRequirements{
113+
Limits: v1.ResourceList{
114+
v1.ResourceCPU: resource.MustParse("200m"),
115+
v1.ResourceMemory: resource.MustParse("2000"),
116+
},
107117
Requests: v1.ResourceList{
108118
v1.ResourceCPU: resource.MustParse("200m"),
109119
v1.ResourceMemory: resource.MustParse("2000"),
@@ -128,6 +138,7 @@ func TestPriorityMetadata(t *testing.T) {
128138
pod: podWithTolerationsAndAffinity,
129139
expected: &priorityMetadata{
130140
nonZeroRequest: nonZeroReqs,
141+
podLimits: nonPodLimits,
131142
podTolerations: tolerations,
132143
affinity: podAffinity,
133144
},
@@ -137,6 +148,7 @@ func TestPriorityMetadata(t *testing.T) {
137148
pod: podWithTolerationsAndRequests,
138149
expected: &priorityMetadata{
139150
nonZeroRequest: specifiedReqs,
151+
podLimits: nonPodLimits,
140152
podTolerations: tolerations,
141153
affinity: nil,
142154
},
@@ -146,6 +158,7 @@ func TestPriorityMetadata(t *testing.T) {
146158
pod: podWithAffinityAndRequests,
147159
expected: &priorityMetadata{
148160
nonZeroRequest: specifiedReqs,
161+
podLimits: specifiedPodLimits,
149162
podTolerations: nil,
150163
affinity: podAffinity,
151164
},

pkg/scheduler/algorithm/priorities/resource_limits.go

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,14 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
4242
allocatableResources := nodeInfo.AllocatableResource()
4343

4444
// compute pod limits
45-
podLimits := getResourceLimits(pod)
45+
var podLimits *schedulernodeinfo.Resource
46+
if priorityMeta, ok := meta.(*priorityMetadata); ok && priorityMeta != nil {
47+
// We were able to parse metadata, use podLimits from there.
48+
podLimits = priorityMeta.podLimits
49+
} else {
50+
// We couldn't parse metadata - fallback to computing it.
51+
podLimits = getResourceLimits(pod)
52+
}
4653

4754
cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
4855
memScore := computeScore(podLimits.Memory, allocatableResources.Memory)
@@ -83,7 +90,6 @@ func computeScore(limit, allocatable int64) int64 {
8390
// The reason to create this new function is to be consistent with other
8491
// priority functions because most or perhaps all priority functions work
8592
// with schedulernodeinfo.Resource.
86-
// TODO: cache it as part of metadata passed to priority functions.
8793
func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource {
8894
result := &schedulernodeinfo.Resource{}
8995
for _, container := range pod.Spec.Containers {

pkg/scheduler/algorithm/priorities/resource_limits_test.go

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import (
2727
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
2828
)
2929

30-
func TestResourceLimistPriority(t *testing.T) {
30+
func TestResourceLimitsPriority(t *testing.T) {
3131
noResources := v1.PodSpec{
3232
Containers: []v1.Container{},
3333
}
@@ -140,12 +140,22 @@ func TestResourceLimistPriority(t *testing.T) {
140140
for _, test := range tests {
141141
t.Run(test.name, func(t *testing.T) {
142142
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
143-
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
144-
if err != nil {
145-
t.Errorf("unexpected error: %v", err)
146-
}
147-
if !reflect.DeepEqual(test.expectedList, list) {
148-
t.Errorf("expected %#v, got %#v", test.expectedList, list)
143+
144+
for _, hasMeta := range []bool{true, false} {
145+
var metadata *priorityMetadata
146+
if hasMeta {
147+
metadata = &priorityMetadata{
148+
podLimits: getResourceLimits(test.pod),
149+
}
150+
}
151+
152+
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, metadata)(test.pod, nodeNameToInfo, test.nodes)
153+
if err != nil {
154+
t.Errorf("unexpected error: %v", err)
155+
}
156+
if !reflect.DeepEqual(test.expectedList, list) {
157+
t.Errorf("expected %#v, got %#v", test.expectedList, list)
158+
}
149159
}
150160
})
151161
}

0 commit comments

Comments
 (0)