Skip to content

Commit b850b5c

Browse files
authored
Merge pull request kubernetes#89222 from fengzixu/master
bugfix: initcontainer is also taken into account when calculating resource requests
2 parents 29e4e66 + d54822d commit b850b5c

File tree

5 files changed

+122
-7
lines changed

5 files changed

+122
-7
lines changed

pkg/scheduler/framework/plugins/noderesources/resource_allocation.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.P
114114

115115
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
116116
// PodOverhead feature is enabled, the Overhead is added to the result.
117+
// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
117118
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
118119
var podRequest int64
119120
for i := range pod.Spec.Containers {
@@ -122,11 +123,20 @@ func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
122123
podRequest += value
123124
}
124125

126+
for i := range pod.Spec.InitContainers {
127+
initContainer := &pod.Spec.InitContainers[i]
128+
value := schedutil.GetNonzeroRequestForResource(resource, &initContainer.Resources.Requests)
129+
if podRequest < value {
130+
podRequest = value
131+
}
132+
}
133+
125134
// If Overhead is being utilized, add to the total requests for the pod
126135
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
127136
if quantity, found := pod.Spec.Overhead[resource]; found {
128137
podRequest += quantity.Value()
129138
}
130139
}
140+
131141
return podRequest
132142
}

pkg/scheduler/framework/v1alpha1/types.go

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,10 @@ func (r *Resource) Add(rl v1.ResourceList) {
208208
case v1.ResourcePods:
209209
r.AllowedPodNumber += int(rQuant.Value())
210210
case v1.ResourceEphemeralStorage:
211-
r.EphemeralStorage += rQuant.Value()
211+
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
212+
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk.
213+
r.EphemeralStorage += rQuant.Value()
214+
}
212215
default:
213216
if v1helper.IsScalarResourceName(rName) {
214217
r.AddScalar(rName, rQuant.Value())
@@ -458,21 +461,32 @@ func (n *NodeInfo) resetSlicesIfEmpty() {
458461
}
459462
}
460463

464+
// resourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
461465
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
462466
resPtr := &res
463467
for _, c := range pod.Spec.Containers {
464468
resPtr.Add(c.Resources.Requests)
465-
466469
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
467470
non0CPU += non0CPUReq
468471
non0Mem += non0MemReq
469472
// No non-zero resources for GPUs or opaque resources.
470473
}
471474

475+
for _, ic := range pod.Spec.InitContainers {
476+
resPtr.SetMaxResource(ic.Resources.Requests)
477+
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&ic.Resources.Requests)
478+
if non0CPU < non0CPUReq {
479+
non0CPU = non0CPUReq
480+
}
481+
482+
if non0Mem < non0MemReq {
483+
non0Mem = non0MemReq
484+
}
485+
}
486+
472487
// If Overhead is being utilized, add to the total requests for the pod
473488
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
474489
resPtr.Add(pod.Spec.Overhead)
475-
476490
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
477491
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
478492
}

pkg/scheduler/framework/v1alpha1/types_test.go

Lines changed: 86 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -614,6 +614,46 @@ func TestNodeInfoAddPod(t *testing.T) {
614614
},
615615
},
616616
},
617+
{
618+
ObjectMeta: metav1.ObjectMeta{
619+
Namespace: "node_info_cache_test",
620+
Name: "test-3",
621+
UID: types.UID("test-3"),
622+
},
623+
Spec: v1.PodSpec{
624+
Containers: []v1.Container{
625+
{
626+
Resources: v1.ResourceRequirements{
627+
Requests: v1.ResourceList{
628+
v1.ResourceCPU: resource.MustParse("200m"),
629+
},
630+
},
631+
Ports: []v1.ContainerPort{
632+
{
633+
HostIP: "127.0.0.1",
634+
HostPort: 8080,
635+
Protocol: "TCP",
636+
},
637+
},
638+
},
639+
},
640+
InitContainers: []v1.Container{
641+
{
642+
Resources: v1.ResourceRequirements{
643+
Requests: v1.ResourceList{
644+
v1.ResourceCPU: resource.MustParse("500m"),
645+
v1.ResourceMemory: resource.MustParse("200Mi"),
646+
},
647+
},
648+
},
649+
},
650+
NodeName: nodeName,
651+
Overhead: v1.ResourceList{
652+
v1.ResourceCPU: resource.MustParse("500m"),
653+
v1.ResourceMemory: resource.MustParse("500"),
654+
},
655+
},
656+
},
617657
}
618658
expected := &NodeInfo{
619659
node: &v1.Node{
@@ -622,15 +662,15 @@ func TestNodeInfoAddPod(t *testing.T) {
622662
},
623663
},
624664
Requested: &Resource{
625-
MilliCPU: 1300,
626-
Memory: 1000,
665+
MilliCPU: 2300,
666+
Memory: 209716700, //1500 + 200MB in initContainers
627667
EphemeralStorage: 0,
628668
AllowedPodNumber: 0,
629669
ScalarResources: map[v1.ResourceName]int64(nil),
630670
},
631671
NonZeroRequested: &Resource{
632-
MilliCPU: 1300,
633-
Memory: 209716200, //200MB + 1000 specified in requests/overhead
672+
MilliCPU: 2300,
673+
Memory: 419431900, //200MB(initContainers) + 200MB(default memory value) + 1500 specified in requests/overhead
634674
EphemeralStorage: 0,
635675
AllowedPodNumber: 0,
636676
ScalarResources: map[v1.ResourceName]int64(nil),
@@ -710,6 +750,48 @@ func TestNodeInfoAddPod(t *testing.T) {
710750
},
711751
},
712752
},
753+
{
754+
Pod: &v1.Pod{
755+
ObjectMeta: metav1.ObjectMeta{
756+
Namespace: "node_info_cache_test",
757+
Name: "test-3",
758+
UID: types.UID("test-3"),
759+
},
760+
Spec: v1.PodSpec{
761+
Containers: []v1.Container{
762+
{
763+
Resources: v1.ResourceRequirements{
764+
Requests: v1.ResourceList{
765+
v1.ResourceCPU: resource.MustParse("200m"),
766+
},
767+
},
768+
Ports: []v1.ContainerPort{
769+
{
770+
HostIP: "127.0.0.1",
771+
HostPort: 8080,
772+
Protocol: "TCP",
773+
},
774+
},
775+
},
776+
},
777+
InitContainers: []v1.Container{
778+
{
779+
Resources: v1.ResourceRequirements{
780+
Requests: v1.ResourceList{
781+
v1.ResourceCPU: resource.MustParse("500m"),
782+
v1.ResourceMemory: resource.MustParse("200Mi"),
783+
},
784+
},
785+
},
786+
},
787+
NodeName: nodeName,
788+
Overhead: v1.ResourceList{
789+
v1.ResourceCPU: resource.MustParse("500m"),
790+
v1.ResourceMemory: resource.MustParse("500"),
791+
},
792+
},
793+
},
794+
},
713795
},
714796
}
715797

pkg/scheduler/util/BUILD

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,12 @@ go_library(
3838
deps = [
3939
"//pkg/api/v1/pod:go_default_library",
4040
"//pkg/apis/core/v1/helper:go_default_library",
41+
"//pkg/features:go_default_library",
4142
"//staging/src/k8s.io/api/core/v1:go_default_library",
4243
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
4344
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
4445
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
46+
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
4547
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
4648
"//vendor/k8s.io/klog:go_default_library",
4749
],

pkg/scheduler/util/non_zero.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@ package util
1818

1919
import (
2020
v1 "k8s.io/api/core/v1"
21+
utilfeature "k8s.io/apiserver/pkg/util/feature"
2122
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
23+
"k8s.io/kubernetes/pkg/features"
2224
)
2325

2426
// For each of these resources, a pod that doesn't request the resource explicitly
@@ -60,6 +62,11 @@ func GetNonzeroRequestForResource(resource v1.ResourceName, requests *v1.Resourc
6062
}
6163
return requests.Memory().Value()
6264
case v1.ResourceEphemeralStorage:
65+
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk.
66+
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
67+
return 0
68+
}
69+
6370
quantity, found := (*requests)[v1.ResourceEphemeralStorage]
6471
if !found {
6572
return 0

0 commit comments

Comments
 (0)