@@ -25,9 +25,8 @@ import (
25
25
26
26
"k8s.io/api/core/v1"
27
27
"k8s.io/apimachinery/pkg/api/resource"
28
- utilfeature "k8s.io/apiserver/pkg/util/feature"
29
28
"k8s.io/klog"
30
- "k8s.io/kubernetes/pkg/features "
29
+ v1resource "k8s.io/kubernetes/pkg/api/v1/resource "
31
30
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
32
31
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
33
32
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
@@ -536,8 +535,8 @@ func exceedMemoryRequests(stats statsFunc) cmpFunc {
536
535
537
536
p1Memory := memoryUsage (p1Stats .Memory )
538
537
p2Memory := memoryUsage (p2Stats .Memory )
539
- p1ExceedsRequests := p1Memory .Cmp (podRequest (p1 , v1 .ResourceMemory )) == 1
540
- p2ExceedsRequests := p2Memory .Cmp (podRequest (p2 , v1 .ResourceMemory )) == 1
538
+ p1ExceedsRequests := p1Memory .Cmp (v1resource . GetResourceRequestQuantity (p1 , v1 .ResourceMemory )) == 1
539
+ p2ExceedsRequests := p2Memory .Cmp (v1resource . GetResourceRequestQuantity (p2 , v1 .ResourceMemory )) == 1
541
540
// prioritize evicting the pod which exceeds its requests
542
541
return cmpBool (p1ExceedsRequests , p2ExceedsRequests )
543
542
}
@@ -555,53 +554,18 @@ func memory(stats statsFunc) cmpFunc {
555
554
556
555
// adjust p1, p2 usage relative to the request (if any)
557
556
p1Memory := memoryUsage (p1Stats .Memory )
558
- p1Request := podRequest (p1 , v1 .ResourceMemory )
557
+ p1Request := v1resource . GetResourceRequestQuantity (p1 , v1 .ResourceMemory )
559
558
p1Memory .Sub (p1Request )
560
559
561
560
p2Memory := memoryUsage (p2Stats .Memory )
562
- p2Request := podRequest (p2 , v1 .ResourceMemory )
561
+ p2Request := v1resource . GetResourceRequestQuantity (p2 , v1 .ResourceMemory )
563
562
p2Memory .Sub (p2Request )
564
563
565
564
// prioritize evicting the pod which has the larger consumption of memory
566
565
return p2Memory .Cmp (* p1Memory )
567
566
}
568
567
}
569
568
570
- // podRequest returns the total resource request of a pod which is the
571
- // max(max of init container requests, sum of container requests)
572
- func podRequest (pod * v1.Pod , resourceName v1.ResourceName ) resource.Quantity {
573
- containerValue := resource.Quantity {Format : resource .BinarySI }
574
- if resourceName == v1 .ResourceEphemeralStorage && ! utilfeature .DefaultFeatureGate .Enabled (features .LocalStorageCapacityIsolation ) {
575
- // if the local storage capacity isolation feature gate is disabled, pods request 0 disk
576
- return containerValue
577
- }
578
- for i := range pod .Spec .Containers {
579
- switch resourceName {
580
- case v1 .ResourceMemory :
581
- containerValue .Add (* pod .Spec .Containers [i ].Resources .Requests .Memory ())
582
- case v1 .ResourceEphemeralStorage :
583
- containerValue .Add (* pod .Spec .Containers [i ].Resources .Requests .StorageEphemeral ())
584
- }
585
- }
586
- initValue := resource.Quantity {Format : resource .BinarySI }
587
- for i := range pod .Spec .InitContainers {
588
- switch resourceName {
589
- case v1 .ResourceMemory :
590
- if initValue .Cmp (* pod .Spec .InitContainers [i ].Resources .Requests .Memory ()) < 0 {
591
- initValue = * pod .Spec .InitContainers [i ].Resources .Requests .Memory ()
592
- }
593
- case v1 .ResourceEphemeralStorage :
594
- if initValue .Cmp (* pod .Spec .InitContainers [i ].Resources .Requests .StorageEphemeral ()) < 0 {
595
- initValue = * pod .Spec .InitContainers [i ].Resources .Requests .StorageEphemeral ()
596
- }
597
- }
598
- }
599
- if containerValue .Cmp (initValue ) > 0 {
600
- return containerValue
601
- }
602
- return initValue
603
- }
604
-
605
569
// exceedDiskRequests compares whether or not pods' disk usage exceeds their requests
606
570
func exceedDiskRequests (stats statsFunc , fsStatsToMeasure []fsStatsType , diskResource v1.ResourceName ) cmpFunc {
607
571
return func (p1 , p2 * v1.Pod ) int {
@@ -621,8 +585,8 @@ func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskRes
621
585
622
586
p1Disk := p1Usage [diskResource ]
623
587
p2Disk := p2Usage [diskResource ]
624
- p1ExceedsRequests := p1Disk .Cmp (podRequest (p1 , diskResource )) == 1
625
- p2ExceedsRequests := p2Disk .Cmp (podRequest (p2 , diskResource )) == 1
588
+ p1ExceedsRequests := p1Disk .Cmp (v1resource . GetResourceRequestQuantity (p1 , diskResource )) == 1
589
+ p2ExceedsRequests := p2Disk .Cmp (v1resource . GetResourceRequestQuantity (p2 , diskResource )) == 1
626
590
// prioritize evicting the pod which exceeds its requests
627
591
return cmpBool (p1ExceedsRequests , p2ExceedsRequests )
628
592
}
@@ -647,9 +611,9 @@ func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.Resou
647
611
// adjust p1, p2 usage relative to the request (if any)
648
612
p1Disk := p1Usage [diskResource ]
649
613
p2Disk := p2Usage [diskResource ]
650
- p1Request := podRequest (p1 , v1 .ResourceEphemeralStorage )
614
+ p1Request := v1resource . GetResourceRequestQuantity (p1 , v1 .ResourceEphemeralStorage )
651
615
p1Disk .Sub (p1Request )
652
- p2Request := podRequest (p2 , v1 .ResourceEphemeralStorage )
616
+ p2Request := v1resource . GetResourceRequestQuantity (p2 , v1 .ResourceEphemeralStorage )
653
617
p2Disk .Sub (p2Request )
654
618
// prioritize evicting the pod which has the larger consumption of disk
655
619
return p2Disk .Cmp (p1Disk )
0 commit comments