Skip to content

Commit ce80aea

Browse files
authored
Merge pull request kubernetes#78319 from egernst/scheduler-changes
Scheduler changes to introduce alpha support for Pod Overhead
2 parents 5de4134 + 9babbf8 commit ce80aea

File tree

6 files changed

+139
-16
lines changed

6 files changed

+139
-16
lines changed

pkg/scheduler/algorithm/predicates/predicates.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -726,6 +726,9 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI
726726
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
727727
// regular containers since they run simultaneously.
728728
//
729+
// If Pod Overhead is specified and the feature gate is set, the resources defined for Overhead
730+
// are added to the calculated Resource request sum
731+
//
729732
// Example:
730733
//
731734
// Pod:
@@ -756,6 +759,11 @@ func GetResourceRequest(pod *v1.Pod) *schedulernodeinfo.Resource {
756759
result.SetMaxResource(container.Resources.Requests)
757760
}
758761

762+
// If Overhead is being utilized, add to the total requests for the pod
763+
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
764+
result.Add(pod.Spec.Overhead)
765+
}
766+
759767
return result
760768
}
761769

pkg/scheduler/algorithm/predicates/predicates_test.go

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,10 @@ import (
2929
"k8s.io/apimachinery/pkg/api/resource"
3030
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3131
"k8s.io/apimachinery/pkg/util/sets"
32+
utilfeature "k8s.io/apiserver/pkg/util/feature"
33+
featuregatetesting "k8s.io/component-base/featuregate/testing"
3234
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
35+
"k8s.io/kubernetes/pkg/features"
3336
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
3437
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
3538
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
@@ -86,12 +89,20 @@ func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Po
8689
return pod
8790
}
8891

92+
func newResourceOverheadPod(pod *v1.Pod, overhead v1.ResourceList) *v1.Pod {
93+
pod.Spec.Overhead = overhead
94+
return pod
95+
}
96+
8997
func GetPredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
9098
pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}}
9199
return pm.GetMetadata(p, nodeInfo)
92100
}
93101

94102
func TestPodFitsResources(t *testing.T) {
103+
104+
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
105+
95106
enoughPodsTests := []struct {
96107
pod *v1.Pod
97108
nodeInfo *schedulernodeinfo.NodeInfo
@@ -351,6 +362,31 @@ func TestPodFitsResources(t *testing.T) {
351362
ignoredExtendedResources: sets.NewString(string(extendedResourceB)),
352363
name: "skip checking ignored extended resource",
353364
},
365+
{
366+
pod: newResourceOverheadPod(
367+
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
368+
v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
369+
),
370+
nodeInfo: schedulernodeinfo.NewNodeInfo(
371+
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
372+
fits: true,
373+
ignoredExtendedResources: sets.NewString(string(extendedResourceB)),
374+
name: "resources + pod overhead fits",
375+
},
376+
{
377+
pod: newResourceOverheadPod(
378+
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
379+
v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
380+
),
381+
nodeInfo: schedulernodeinfo.NewNodeInfo(
382+
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
383+
fits: false,
384+
ignoredExtendedResources: sets.NewString(string(extendedResourceB)),
385+
name: "requests + overhead does not fit for memory",
386+
reasons: []PredicateFailureReason{
387+
NewInsufficientResourceError(v1.ResourceMemory, 16, 5, 20),
388+
},
389+
},
354390
}
355391

356392
for _, test := range enoughPodsTests {

pkg/scheduler/algorithm/priorities/resource_allocation.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,8 @@ func (r *ResourceAllocationPriority) PriorityMap(
9191
}, nil
9292
}
9393

94+
// getNonZeroRequests returns the total non-zero requests. If Overhead is defined for the pod and the
95+
// PodOverhead feature is enabled, the Overhead is added to the result.
9496
func getNonZeroRequests(pod *v1.Pod) *schedulernodeinfo.Resource {
9597
result := &schedulernodeinfo.Resource{}
9698
for i := range pod.Spec.Containers {
@@ -99,5 +101,17 @@ func getNonZeroRequests(pod *v1.Pod) *schedulernodeinfo.Resource {
99101
result.MilliCPU += cpu
100102
result.Memory += memory
101103
}
104+
105+
// If Overhead is being utilized, add to the total requests for the pod
106+
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
107+
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
108+
result.MilliCPU += pod.Spec.Overhead.Cpu().MilliValue()
109+
}
110+
111+
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
112+
result.Memory += pod.Spec.Overhead.Memory().Value()
113+
}
114+
}
115+
102116
return result
103117
}

pkg/scheduler/nodeinfo/BUILD

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,14 @@ go_library(
1111
visibility = ["//visibility:public"],
1212
deps = [
1313
"//pkg/apis/core/v1/helper:go_default_library",
14+
"//pkg/features:go_default_library",
1415
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
1516
"//pkg/volume/util:go_default_library",
1617
"//staging/src/k8s.io/api/core/v1:go_default_library",
1718
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
1819
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
1920
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
21+
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
2022
"//vendor/k8s.io/klog:go_default_library",
2123
],
2224
)
@@ -30,11 +32,14 @@ go_test(
3032
],
3133
embed = [":go_default_library"],
3234
deps = [
35+
"//pkg/features:go_default_library",
3336
"//staging/src/k8s.io/api/core/v1:go_default_library",
3437
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
3538
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
3639
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
3740
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
41+
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
42+
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
3843
],
3944
)
4045

pkg/scheduler/nodeinfo/node_info.go

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,10 @@ import (
2525
v1 "k8s.io/api/core/v1"
2626
storagev1beta1 "k8s.io/api/storage/v1beta1"
2727
"k8s.io/apimachinery/pkg/api/resource"
28+
utilfeature "k8s.io/apiserver/pkg/util/feature"
2829
"k8s.io/klog"
2930
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
31+
"k8s.io/kubernetes/pkg/features"
3032
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
3133
volumeutil "k8s.io/kubernetes/pkg/volume/util"
3234
)
@@ -613,6 +615,19 @@ func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64)
613615
// No non-zero resources for GPUs or opaque resources.
614616
}
615617

618+
// If Overhead is being utilized, add to the total requests for the pod
619+
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
620+
resPtr.Add(pod.Spec.Overhead)
621+
622+
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
623+
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
624+
}
625+
626+
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
627+
non0Mem += pod.Spec.Overhead.Memory().Value()
628+
}
629+
}
630+
616631
return
617632
}
618633

pkg/scheduler/nodeinfo/node_info_test.go

Lines changed: 61 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@ import (
2626
"k8s.io/apimachinery/pkg/api/resource"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2828
"k8s.io/apimachinery/pkg/types"
29+
utilfeature "k8s.io/apiserver/pkg/util/feature"
30+
featuregatetesting "k8s.io/component-base/featuregate/testing"
31+
"k8s.io/kubernetes/pkg/features"
2932
)
3033

3134
func TestNewResource(t *testing.T) {
@@ -540,6 +543,9 @@ func TestNodeInfoClone(t *testing.T) {
540543
}
541544

542545
func TestNodeInfoAddPod(t *testing.T) {
546+
547+
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
548+
543549
nodeName := "test-node"
544550
pods := []*v1.Pod{
545551
{
@@ -567,6 +573,9 @@ func TestNodeInfoAddPod(t *testing.T) {
567573
},
568574
},
569575
NodeName: nodeName,
576+
Overhead: v1.ResourceList{
577+
v1.ResourceCPU: resource.MustParse("500m"),
578+
},
570579
},
571580
},
572581
{
@@ -580,8 +589,7 @@ func TestNodeInfoAddPod(t *testing.T) {
580589
{
581590
Resources: v1.ResourceRequirements{
582591
Requests: v1.ResourceList{
583-
v1.ResourceCPU: resource.MustParse("200m"),
584-
v1.ResourceMemory: resource.MustParse("1Ki"),
592+
v1.ResourceCPU: resource.MustParse("200m"),
585593
},
586594
},
587595
Ports: []v1.ContainerPort{
@@ -594,6 +602,10 @@ func TestNodeInfoAddPod(t *testing.T) {
594602
},
595603
},
596604
NodeName: nodeName,
605+
Overhead: v1.ResourceList{
606+
v1.ResourceCPU: resource.MustParse("500m"),
607+
v1.ResourceMemory: resource.MustParse("500"),
608+
},
597609
},
598610
},
599611
}
@@ -604,15 +616,15 @@ func TestNodeInfoAddPod(t *testing.T) {
604616
},
605617
},
606618
requestedResource: &Resource{
607-
MilliCPU: 300,
608-
Memory: 1524,
619+
MilliCPU: 1300,
620+
Memory: 1000,
609621
EphemeralStorage: 0,
610622
AllowedPodNumber: 0,
611623
ScalarResources: map[v1.ResourceName]int64(nil),
612624
},
613625
nonzeroRequest: &Resource{
614-
MilliCPU: 300,
615-
Memory: 1524,
626+
MilliCPU: 1300,
627+
Memory: 209716200, //200MB + 1000 specified in requests/overhead
616628
EphemeralStorage: 0,
617629
AllowedPodNumber: 0,
618630
ScalarResources: map[v1.ResourceName]int64(nil),
@@ -653,6 +665,9 @@ func TestNodeInfoAddPod(t *testing.T) {
653665
},
654666
},
655667
NodeName: nodeName,
668+
Overhead: v1.ResourceList{
669+
v1.ResourceCPU: resource.MustParse("500m"),
670+
},
656671
},
657672
},
658673
{
@@ -666,8 +681,7 @@ func TestNodeInfoAddPod(t *testing.T) {
666681
{
667682
Resources: v1.ResourceRequirements{
668683
Requests: v1.ResourceList{
669-
v1.ResourceCPU: resource.MustParse("200m"),
670-
v1.ResourceMemory: resource.MustParse("1Ki"),
684+
v1.ResourceCPU: resource.MustParse("200m"),
671685
},
672686
},
673687
Ports: []v1.ContainerPort{
@@ -680,6 +694,10 @@ func TestNodeInfoAddPod(t *testing.T) {
680694
},
681695
},
682696
NodeName: nodeName,
697+
Overhead: v1.ResourceList{
698+
v1.ResourceCPU: resource.MustParse("500m"),
699+
v1.ResourceMemory: resource.MustParse("500"),
700+
},
683701
},
684702
},
685703
},
@@ -702,12 +720,23 @@ func TestNodeInfoAddPod(t *testing.T) {
702720
}
703721

704722
func TestNodeInfoRemovePod(t *testing.T) {
723+
724+
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
725+
705726
nodeName := "test-node"
706727
pods := []*v1.Pod{
707728
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
708729
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
709730
}
710731

732+
// add pod Overhead
733+
for _, pod := range pods {
734+
pod.Spec.Overhead = v1.ResourceList{
735+
v1.ResourceCPU: resource.MustParse("500m"),
736+
v1.ResourceMemory: resource.MustParse("500"),
737+
}
738+
}
739+
711740
tests := []struct {
712741
pod *v1.Pod
713742
errExpected bool
@@ -723,15 +752,15 @@ func TestNodeInfoRemovePod(t *testing.T) {
723752
},
724753
},
725754
requestedResource: &Resource{
726-
MilliCPU: 300,
727-
Memory: 1524,
755+
MilliCPU: 1300,
756+
Memory: 2524,
728757
EphemeralStorage: 0,
729758
AllowedPodNumber: 0,
730759
ScalarResources: map[v1.ResourceName]int64(nil),
731760
},
732761
nonzeroRequest: &Resource{
733-
MilliCPU: 300,
734-
Memory: 1524,
762+
MilliCPU: 1300,
763+
Memory: 2524,
735764
EphemeralStorage: 0,
736765
AllowedPodNumber: 0,
737766
ScalarResources: map[v1.ResourceName]int64(nil),
@@ -772,6 +801,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
772801
},
773802
},
774803
NodeName: nodeName,
804+
Overhead: v1.ResourceList{
805+
v1.ResourceCPU: resource.MustParse("500m"),
806+
v1.ResourceMemory: resource.MustParse("500"),
807+
},
775808
},
776809
},
777810
{
@@ -799,6 +832,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
799832
},
800833
},
801834
NodeName: nodeName,
835+
Overhead: v1.ResourceList{
836+
v1.ResourceCPU: resource.MustParse("500m"),
837+
v1.ResourceMemory: resource.MustParse("500"),
838+
},
802839
},
803840
},
804841
},
@@ -830,6 +867,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
830867
},
831868
},
832869
NodeName: nodeName,
870+
Overhead: v1.ResourceList{
871+
v1.ResourceCPU: resource.MustParse("500m"),
872+
v1.ResourceMemory: resource.MustParse("500"),
873+
},
833874
},
834875
},
835876
errExpected: false,
@@ -840,15 +881,15 @@ func TestNodeInfoRemovePod(t *testing.T) {
840881
},
841882
},
842883
requestedResource: &Resource{
843-
MilliCPU: 200,
844-
Memory: 1024,
884+
MilliCPU: 700,
885+
Memory: 1524,
845886
EphemeralStorage: 0,
846887
AllowedPodNumber: 0,
847888
ScalarResources: map[v1.ResourceName]int64(nil),
848889
},
849890
nonzeroRequest: &Resource{
850-
MilliCPU: 200,
851-
Memory: 1024,
891+
MilliCPU: 700,
892+
Memory: 1524,
852893
EphemeralStorage: 0,
853894
AllowedPodNumber: 0,
854895
ScalarResources: map[v1.ResourceName]int64(nil),
@@ -888,6 +929,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
888929
},
889930
},
890931
NodeName: nodeName,
932+
Overhead: v1.ResourceList{
933+
v1.ResourceCPU: resource.MustParse("500m"),
934+
v1.ResourceMemory: resource.MustParse("500"),
935+
},
891936
},
892937
},
893938
},

0 commit comments

Comments
 (0)