Skip to content

Commit 476c1c7

Browse files
author
Eric Ernst
committed
kube-eviction: use common resource summation functions
Utilize resource helpers' GetResourceRequestQuantity instead of duplicating the logic here. Signed-off-by: Eric Ernst <[email protected]>
1 parent 9d2d37b commit 476c1c7

File tree

1 file changed

+9
-45
lines changed

1 file changed

+9
-45
lines changed

pkg/kubelet/eviction/helpers.go

Lines changed: 9 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,8 @@ import (
2525

2626
"k8s.io/api/core/v1"
2727
"k8s.io/apimachinery/pkg/api/resource"
28-
utilfeature "k8s.io/apiserver/pkg/util/feature"
2928
"k8s.io/klog"
30-
"k8s.io/kubernetes/pkg/features"
29+
v1resource "k8s.io/kubernetes/pkg/api/v1/resource"
3130
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
3231
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
3332
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
@@ -536,8 +535,8 @@ func exceedMemoryRequests(stats statsFunc) cmpFunc {
536535

537536
p1Memory := memoryUsage(p1Stats.Memory)
538537
p2Memory := memoryUsage(p2Stats.Memory)
539-
p1ExceedsRequests := p1Memory.Cmp(podRequest(p1, v1.ResourceMemory)) == 1
540-
p2ExceedsRequests := p2Memory.Cmp(podRequest(p2, v1.ResourceMemory)) == 1
538+
p1ExceedsRequests := p1Memory.Cmp(v1resource.GetResourceRequestQuantity(p1, v1.ResourceMemory)) == 1
539+
p2ExceedsRequests := p2Memory.Cmp(v1resource.GetResourceRequestQuantity(p2, v1.ResourceMemory)) == 1
541540
// prioritize evicting the pod which exceeds its requests
542541
return cmpBool(p1ExceedsRequests, p2ExceedsRequests)
543542
}
@@ -555,53 +554,18 @@ func memory(stats statsFunc) cmpFunc {
555554

556555
// adjust p1, p2 usage relative to the request (if any)
557556
p1Memory := memoryUsage(p1Stats.Memory)
558-
p1Request := podRequest(p1, v1.ResourceMemory)
557+
p1Request := v1resource.GetResourceRequestQuantity(p1, v1.ResourceMemory)
559558
p1Memory.Sub(p1Request)
560559

561560
p2Memory := memoryUsage(p2Stats.Memory)
562-
p2Request := podRequest(p2, v1.ResourceMemory)
561+
p2Request := v1resource.GetResourceRequestQuantity(p2, v1.ResourceMemory)
563562
p2Memory.Sub(p2Request)
564563

565564
// prioritize evicting the pod which has the larger consumption of memory
566565
return p2Memory.Cmp(*p1Memory)
567566
}
568567
}
569568

570-
// podRequest returns the total resource request of a pod which is the
571-
// max(max of init container requests, sum of container requests)
572-
func podRequest(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
573-
containerValue := resource.Quantity{Format: resource.BinarySI}
574-
if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
575-
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk
576-
return containerValue
577-
}
578-
for i := range pod.Spec.Containers {
579-
switch resourceName {
580-
case v1.ResourceMemory:
581-
containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.Memory())
582-
case v1.ResourceEphemeralStorage:
583-
containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.StorageEphemeral())
584-
}
585-
}
586-
initValue := resource.Quantity{Format: resource.BinarySI}
587-
for i := range pod.Spec.InitContainers {
588-
switch resourceName {
589-
case v1.ResourceMemory:
590-
if initValue.Cmp(*pod.Spec.InitContainers[i].Resources.Requests.Memory()) < 0 {
591-
initValue = *pod.Spec.InitContainers[i].Resources.Requests.Memory()
592-
}
593-
case v1.ResourceEphemeralStorage:
594-
if initValue.Cmp(*pod.Spec.InitContainers[i].Resources.Requests.StorageEphemeral()) < 0 {
595-
initValue = *pod.Spec.InitContainers[i].Resources.Requests.StorageEphemeral()
596-
}
597-
}
598-
}
599-
if containerValue.Cmp(initValue) > 0 {
600-
return containerValue
601-
}
602-
return initValue
603-
}
604-
605569
// exceedDiskRequests compares whether or not pods' disk usage exceeds their requests
606570
func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc {
607571
return func(p1, p2 *v1.Pod) int {
@@ -621,8 +585,8 @@ func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskRes
621585

622586
p1Disk := p1Usage[diskResource]
623587
p2Disk := p2Usage[diskResource]
624-
p1ExceedsRequests := p1Disk.Cmp(podRequest(p1, diskResource)) == 1
625-
p2ExceedsRequests := p2Disk.Cmp(podRequest(p2, diskResource)) == 1
588+
p1ExceedsRequests := p1Disk.Cmp(v1resource.GetResourceRequestQuantity(p1, diskResource)) == 1
589+
p2ExceedsRequests := p2Disk.Cmp(v1resource.GetResourceRequestQuantity(p2, diskResource)) == 1
626590
// prioritize evicting the pod which exceeds its requests
627591
return cmpBool(p1ExceedsRequests, p2ExceedsRequests)
628592
}
@@ -647,9 +611,9 @@ func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.Resou
647611
// adjust p1, p2 usage relative to the request (if any)
648612
p1Disk := p1Usage[diskResource]
649613
p2Disk := p2Usage[diskResource]
650-
p1Request := podRequest(p1, v1.ResourceEphemeralStorage)
614+
p1Request := v1resource.GetResourceRequestQuantity(p1, v1.ResourceEphemeralStorage)
651615
p1Disk.Sub(p1Request)
652-
p2Request := podRequest(p2, v1.ResourceEphemeralStorage)
616+
p2Request := v1resource.GetResourceRequestQuantity(p2, v1.ResourceEphemeralStorage)
653617
p2Disk.Sub(p2Request)
654618
// prioritize evicting the pod which has the larger consumption of disk
655619
return p2Disk.Cmp(p1Disk)

0 commit comments

Comments
 (0)