@@ -2588,6 +2588,8 @@ func TestHandlePodResourcesResize(t *testing.T) {
2588
2588
defer testKubelet .Cleanup ()
2589
2589
kubelet := testKubelet .kubelet
2590
2590
2591
+ cpu1m := resource .MustParse ("1m" )
2592
+ cpu2m := resource .MustParse ("2m" )
2591
2593
cpu500m := resource .MustParse ("500m" )
2592
2594
cpu1000m := resource .MustParse ("1" )
2593
2595
cpu1500m := resource .MustParse ("1500m" )
@@ -2671,7 +2673,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
2671
2673
2672
2674
tests := []struct {
2673
2675
name string
2674
- pod * v1.Pod
2676
+ originalRequests v1.ResourceList
2675
2677
newRequests v1.ResourceList
2676
2678
newRequestsAllocated bool // Whether the new requests have already been allocated (but not actuated)
2677
2679
expectedAllocations v1.ResourceList
@@ -2681,79 +2683,113 @@ func TestHandlePodResourcesResize(t *testing.T) {
2681
2683
}{
2682
2684
{
2683
2685
name : "Request CPU and memory decrease - expect InProgress" ,
2684
- pod : testPod2 ,
2686
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2685
2687
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem500M },
2686
2688
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem500M },
2687
2689
expectedResize : v1 .PodResizeStatusInProgress ,
2688
2690
expectBackoffReset : true ,
2689
2691
},
2690
2692
{
2691
2693
name : "Request CPU increase, memory decrease - expect InProgress" ,
2692
- pod : testPod2 ,
2694
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2693
2695
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1500m , v1 .ResourceMemory : mem500M },
2694
2696
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1500m , v1 .ResourceMemory : mem500M },
2695
2697
expectedResize : v1 .PodResizeStatusInProgress ,
2696
2698
expectBackoffReset : true ,
2697
2699
},
2698
2700
{
2699
2701
name : "Request CPU decrease, memory increase - expect InProgress" ,
2700
- pod : testPod2 ,
2702
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2701
2703
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem1500M },
2702
2704
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem1500M },
2703
2705
expectedResize : v1 .PodResizeStatusInProgress ,
2704
2706
expectBackoffReset : true ,
2705
2707
},
2706
2708
{
2707
2709
name : "Request CPU and memory increase beyond current capacity - expect Deferred" ,
2708
- pod : testPod2 ,
2710
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2709
2711
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu2500m , v1 .ResourceMemory : mem2500M },
2710
2712
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
2711
2713
expectedResize : v1 .PodResizeStatusDeferred ,
2712
2714
},
2713
2715
{
2714
2716
name : "Request CPU decrease and memory increase beyond current capacity - expect Deferred" ,
2715
- pod : testPod2 ,
2717
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2716
2718
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem2500M },
2717
2719
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
2718
2720
expectedResize : v1 .PodResizeStatusDeferred ,
2719
2721
},
2720
2722
{
2721
2723
name : "Request memory increase beyond node capacity - expect Infeasible" ,
2722
- pod : testPod2 ,
2724
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2723
2725
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem4500M },
2724
2726
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
2725
2727
expectedResize : v1 .PodResizeStatusInfeasible ,
2726
2728
},
2727
2729
{
2728
2730
name : "Request CPU increase beyond node capacity - expect Infeasible" ,
2729
- pod : testPod2 ,
2731
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2730
2732
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu5000m , v1 .ResourceMemory : mem1000M },
2731
2733
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
2732
2734
expectedResize : v1 .PodResizeStatusInfeasible ,
2733
2735
},
2734
2736
{
2735
2737
name : "CPU increase in progress - expect InProgress" ,
2736
- pod : testPod2 ,
2738
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2737
2739
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1500m , v1 .ResourceMemory : mem1000M },
2738
2740
newRequestsAllocated : true ,
2739
2741
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1500m , v1 .ResourceMemory : mem1000M },
2740
2742
expectedResize : v1 .PodResizeStatusInProgress ,
2741
2743
},
2742
2744
{
2743
2745
name : "No resize" ,
2744
- pod : testPod2 ,
2746
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2745
2747
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
2746
2748
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
2747
2749
expectedResize : "" ,
2748
2750
},
2749
2751
{
2750
2752
name : "windows node, expect Infeasible" ,
2751
- pod : testPod2 ,
2753
+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
2752
2754
newRequests : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem500M },
2753
2755
expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
2754
2756
expectedResize : v1 .PodResizeStatusInfeasible ,
2755
2757
goos : "windows" ,
2756
2758
},
2759
+ {
2760
+ name : "Increase CPU from min shares" ,
2761
+ originalRequests : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2762
+ newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1000m },
2763
+ expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m },
2764
+ expectedResize : v1 .PodResizeStatusInProgress ,
2765
+ expectBackoffReset : true ,
2766
+ },
2767
+ {
2768
+ name : "Decrease CPU to min shares" ,
2769
+ originalRequests : v1.ResourceList {v1 .ResourceCPU : cpu1000m },
2770
+ newRequests : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2771
+ expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2772
+ expectedResize : v1 .PodResizeStatusInProgress ,
2773
+ expectBackoffReset : true ,
2774
+ },
2775
+ {
2776
+ name : "Equivalent min CPU shares" ,
2777
+ originalRequests : v1.ResourceList {v1 .ResourceCPU : cpu1m },
2778
+ newRequests : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2779
+ expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2780
+ expectedResize : "" ,
2781
+ // Even though the resize isn't being actuated, we still clear the container backoff
2782
+ // since the allocation is changing.
2783
+ expectBackoffReset : true ,
2784
+ },
2785
+ {
2786
+ name : "Equivalent min CPU shares - already allocated" ,
2787
+ originalRequests : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2788
+ newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1m },
2789
+ newRequestsAllocated : true ,
2790
+ expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1m },
2791
+ expectedResize : "" ,
2792
+ },
2757
2793
}
2758
2794
2759
2795
for _ , tt := range tests {
@@ -2765,22 +2801,26 @@ func TestHandlePodResourcesResize(t *testing.T) {
2765
2801
}
2766
2802
kubelet .statusManager = status .NewFakeManager ()
2767
2803
2768
- newPod := tt .pod .DeepCopy ()
2804
+ originalPod := testPod1 .DeepCopy ()
2805
+ originalPod .Spec .Containers [0 ].Resources .Requests = tt .originalRequests
2806
+ kubelet .podManager .UpdatePod (originalPod )
2807
+
2808
+ newPod := originalPod .DeepCopy ()
2769
2809
newPod .Spec .Containers [0 ].Resources .Requests = tt .newRequests
2770
2810
2771
2811
if ! tt .newRequestsAllocated {
2772
- require .NoError (t , kubelet .statusManager .SetPodAllocation (tt . pod ))
2812
+ require .NoError (t , kubelet .statusManager .SetPodAllocation (originalPod ))
2773
2813
} else {
2774
2814
require .NoError (t , kubelet .statusManager .SetPodAllocation (newPod ))
2775
2815
}
2776
2816
2777
2817
podStatus := & kubecontainer.PodStatus {
2778
- ID : tt . pod .UID ,
2779
- Name : tt . pod .Name ,
2780
- Namespace : tt . pod .Namespace ,
2781
- ContainerStatuses : make ([]* kubecontainer.Status , len (tt . pod .Spec .Containers )),
2818
+ ID : originalPod .UID ,
2819
+ Name : originalPod .Name ,
2820
+ Namespace : originalPod .Namespace ,
2821
+ ContainerStatuses : make ([]* kubecontainer.Status , len (originalPod .Spec .Containers )),
2782
2822
}
2783
- for i , c := range tt . pod .Spec .Containers {
2823
+ for i , c := range originalPod .Spec .Containers {
2784
2824
podStatus .ContainerStatuses [i ] = & kubecontainer.Status {
2785
2825
Name : c .Name ,
2786
2826
State : kubecontainer .ContainerStateRunning ,
@@ -2794,7 +2834,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
2794
2834
2795
2835
now := kubelet .clock .Now ()
2796
2836
// Put the container in backoff so we can confirm backoff is reset.
2797
- backoffKey := kuberuntime .GetStableKey (tt . pod , & tt . pod .Spec .Containers [0 ])
2837
+ backoffKey := kuberuntime .GetStableKey (originalPod , & originalPod .Spec .Containers [0 ])
2798
2838
kubelet .backOff .Next (backoffKey , now )
2799
2839
2800
2840
updatedPod , err := kubelet .handlePodResourcesResize (newPod , podStatus )
0 commit comments