Skip to content

Commit 8fa8277

Browse files
committed
Added some unit tests
1 parent 2ba6132 commit 8fa8277

File tree

4 files changed

+468
-92
lines changed

4 files changed

+468
-92
lines changed

pkg/apis/core/validation/validation.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5413,16 +5413,16 @@ func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.Contai
54135413
}
54145414

54155415
// Skip any restartable init container that is allowed to restart
5416-
isRestartableInitContainer := false
5416+
isRestartableInitCtr := false
54175417
for _, c := range podSpec.InitContainers {
54185418
if oldStatus.Name == c.Name {
5419-
if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways {
5420-
isRestartableInitContainer = true
5419+
if isRestartableInitContainer(&c) {
5420+
isRestartableInitCtr = true
54215421
}
54225422
break
54235423
}
54245424
}
5425-
if isRestartableInitContainer {
5425+
if isRestartableInitCtr {
54265426
continue
54275427
}
54285428

@@ -5619,7 +5619,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
56195619
// Do not allow removing resource requests/limits on resize.
56205620
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
56215621
for ix, ctr := range oldPod.Spec.InitContainers {
5622-
if ctr.RestartPolicy != nil && *ctr.RestartPolicy != core.ContainerRestartPolicyAlways {
5622+
if !isRestartableInitContainer(&ctr) {
56235623
continue
56245624
}
56255625
if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Requests, ctr.Resources.Requests) {
@@ -5652,7 +5652,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
56525652
var newInitContainers []core.Container
56535653
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
56545654
for ix, container := range originalCPUMemPodSpec.InitContainers {
5655-
if container.RestartPolicy != nil && *container.RestartPolicy == core.ContainerRestartPolicyAlways { // restartable init container
5655+
if isRestartableInitContainer(&container) { // restartable init container
56565656
dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.InitContainers[ix])
56575657
}
56585658
newInitContainers = append(newInitContainers, container)

pkg/kubelet/kubelet_test.go

Lines changed: 127 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -2584,9 +2584,11 @@ func TestPodResourceAllocationReset(t *testing.T) {
25842584

25852585
func TestHandlePodResourcesResize(t *testing.T) {
25862586
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
2587+
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
25872588
testKubelet := newTestKubelet(t, false)
25882589
defer testKubelet.Cleanup()
25892590
kubelet := testKubelet.kubelet
2591+
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
25902592

25912593
cpu1m := resource.MustParse("1m")
25922594
cpu2m := resource.MustParse("2m")
@@ -2651,6 +2653,28 @@ func TestHandlePodResourcesResize(t *testing.T) {
26512653
testPod2.UID = "2222"
26522654
testPod2.Name = "pod2"
26532655
testPod2.Namespace = "ns2"
2656+
testPod2.Spec = v1.PodSpec{
2657+
InitContainers: []v1.Container{
2658+
{
2659+
Name: "c1-init",
2660+
Image: "i1",
2661+
Resources: v1.ResourceRequirements{
2662+
Requests: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
2663+
},
2664+
RestartPolicy: &containerRestartPolicyAlways,
2665+
},
2666+
},
2667+
}
2668+
testPod2.Status = v1.PodStatus{
2669+
Phase: v1.PodRunning,
2670+
InitContainerStatuses: []v1.ContainerStatus{
2671+
{
2672+
Name: "c1-init",
2673+
AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
2674+
Resources: &v1.ResourceRequirements{},
2675+
},
2676+
},
2677+
}
26542678
testPod3 := testPod1.DeepCopy()
26552679
testPod3.UID = "3333"
26562680
testPod3.Name = "pod3"
@@ -2842,72 +2866,115 @@ func TestHandlePodResourcesResize(t *testing.T) {
28422866
}
28432867

28442868
for _, tt := range tests {
2845-
t.Run(tt.name, func(t *testing.T) {
2846-
oldGOOS := goos
2847-
defer func() { goos = oldGOOS }()
2848-
if tt.goos != "" {
2849-
goos = tt.goos
2850-
}
2851-
kubelet.statusManager = status.NewFakeManager()
2869+
for _, isSidecarContainer := range []bool{false, true} {
2870+
t.Run(tt.name, func(t *testing.T) {
2871+
oldGOOS := goos
2872+
defer func() { goos = oldGOOS }()
2873+
if tt.goos != "" {
2874+
goos = tt.goos
2875+
}
2876+
kubelet.statusManager = status.NewFakeManager()
2877+
2878+
var originalPod *v1.Pod
2879+
if isSidecarContainer {
2880+
originalPod = testPod2.DeepCopy()
2881+
originalPod.Spec.InitContainers[0].Resources.Requests = tt.originalRequests
2882+
originalPod.Spec.InitContainers[0].Resources.Limits = tt.originalLimits
2883+
} else {
2884+
originalPod = testPod1.DeepCopy()
2885+
originalPod.Spec.Containers[0].Resources.Requests = tt.originalRequests
2886+
originalPod.Spec.Containers[0].Resources.Limits = tt.originalLimits
2887+
}
28522888

2853-
originalPod := testPod1.DeepCopy()
2854-
originalPod.Spec.Containers[0].Resources.Requests = tt.originalRequests
2855-
originalPod.Spec.Containers[0].Resources.Limits = tt.originalLimits
2856-
kubelet.podManager.UpdatePod(originalPod)
2889+
kubelet.podManager.UpdatePod(originalPod)
28572890

2858-
newPod := originalPod.DeepCopy()
2859-
newPod.Spec.Containers[0].Resources.Requests = tt.newRequests
2860-
newPod.Spec.Containers[0].Resources.Limits = tt.newLimits
2891+
newPod := originalPod.DeepCopy()
28612892

2862-
if !tt.newResourcesAllocated {
2863-
require.NoError(t, kubelet.statusManager.SetPodAllocation(originalPod))
2864-
} else {
2865-
require.NoError(t, kubelet.statusManager.SetPodAllocation(newPod))
2866-
}
2893+
if isSidecarContainer {
2894+
newPod.Spec.InitContainers[0].Resources.Requests = tt.newRequests
2895+
newPod.Spec.InitContainers[0].Resources.Limits = tt.newLimits
2896+
} else {
2897+
newPod.Spec.Containers[0].Resources.Requests = tt.newRequests
2898+
newPod.Spec.Containers[0].Resources.Limits = tt.newLimits
2899+
}
28672900

2868-
podStatus := &kubecontainer.PodStatus{
2869-
ID: originalPod.UID,
2870-
Name: originalPod.Name,
2871-
Namespace: originalPod.Namespace,
2872-
ContainerStatuses: make([]*kubecontainer.Status, len(originalPod.Spec.Containers)),
2873-
}
2874-
for i, c := range originalPod.Spec.Containers {
2875-
podStatus.ContainerStatuses[i] = &kubecontainer.Status{
2876-
Name: c.Name,
2877-
State: kubecontainer.ContainerStateRunning,
2878-
Resources: &kubecontainer.ContainerResources{
2879-
CPURequest: c.Resources.Requests.Cpu(),
2880-
CPULimit: c.Resources.Limits.Cpu(),
2881-
MemoryLimit: c.Resources.Limits.Memory(),
2882-
},
2901+
if !tt.newResourcesAllocated {
2902+
require.NoError(t, kubelet.statusManager.SetPodAllocation(originalPod))
2903+
} else {
2904+
require.NoError(t, kubelet.statusManager.SetPodAllocation(newPod))
28832905
}
2884-
}
28852906

2886-
now := kubelet.clock.Now()
2887-
// Put the container in backoff so we can confirm backoff is reset.
2888-
backoffKey := kuberuntime.GetStableKey(originalPod, &originalPod.Spec.Containers[0])
2889-
kubelet.backOff.Next(backoffKey, now)
2890-
2891-
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
2892-
require.NoError(t, err)
2893-
assert.Equal(t, tt.expectedAllocatedReqs, updatedPod.Spec.Containers[0].Resources.Requests, "updated pod spec requests")
2894-
assert.Equal(t, tt.expectedAllocatedLims, updatedPod.Spec.Containers[0].Resources.Limits, "updated pod spec limits")
2895-
2896-
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), newPod.Spec.Containers[0].Name)
2897-
require.True(t, found, "container allocation")
2898-
assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation")
2899-
assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation")
2900-
2901-
resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID)
2902-
assert.Equal(t, tt.expectedResize, resizeStatus)
2903-
2904-
isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now)
2905-
if tt.expectBackoffReset {
2906-
assert.False(t, isInBackoff, "container backoff should be reset")
2907-
} else {
2908-
assert.True(t, isInBackoff, "container backoff should not be reset")
2909-
}
2910-
})
2907+
podStatus := &kubecontainer.PodStatus{
2908+
ID: originalPod.UID,
2909+
Name: originalPod.Name,
2910+
Namespace: originalPod.Namespace,
2911+
}
2912+
2913+
setContainerStatus := func(podStatus *kubecontainer.PodStatus, c *v1.Container, idx int) {
2914+
podStatus.ContainerStatuses[idx] = &kubecontainer.Status{
2915+
Name: c.Name,
2916+
State: kubecontainer.ContainerStateRunning,
2917+
Resources: &kubecontainer.ContainerResources{
2918+
CPURequest: c.Resources.Requests.Cpu(),
2919+
CPULimit: c.Resources.Limits.Cpu(),
2920+
MemoryLimit: c.Resources.Limits.Memory(),
2921+
},
2922+
}
2923+
}
2924+
2925+
if isSidecarContainer {
2926+
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.InitContainers))
2927+
for i, c := range originalPod.Spec.InitContainers {
2928+
setContainerStatus(podStatus, &c, i)
2929+
}
2930+
} else {
2931+
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.Containers))
2932+
for i, c := range originalPod.Spec.Containers {
2933+
setContainerStatus(podStatus, &c, i)
2934+
}
2935+
}
2936+
2937+
now := kubelet.clock.Now()
2938+
// Put the container in backoff so we can confirm backoff is reset.
2939+
var backoffKey string
2940+
if isSidecarContainer {
2941+
backoffKey = kuberuntime.GetStableKey(originalPod, &originalPod.Spec.InitContainers[0])
2942+
} else {
2943+
backoffKey = kuberuntime.GetStableKey(originalPod, &originalPod.Spec.Containers[0])
2944+
}
2945+
kubelet.backOff.Next(backoffKey, now)
2946+
2947+
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
2948+
require.NoError(t, err)
2949+
2950+
var updatedPodCtr v1.Container
2951+
var newPodCtr v1.Container
2952+
if isSidecarContainer {
2953+
updatedPodCtr = updatedPod.Spec.InitContainers[0]
2954+
newPodCtr = newPod.Spec.InitContainers[0]
2955+
} else {
2956+
updatedPodCtr = updatedPod.Spec.Containers[0]
2957+
newPodCtr = newPod.Spec.Containers[0]
2958+
}
2959+
assert.Equal(t, tt.expectedAllocatedReqs, updatedPodCtr.Resources.Requests, "updated pod spec requests")
2960+
assert.Equal(t, tt.expectedAllocatedLims, updatedPodCtr.Resources.Limits, "updated pod spec limits")
2961+
2962+
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), newPodCtr.Name)
2963+
require.True(t, found, "container allocation")
2964+
assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation")
2965+
assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation")
2966+
2967+
resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID)
2968+
assert.Equal(t, tt.expectedResize, resizeStatus)
2969+
2970+
isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now)
2971+
if tt.expectBackoffReset {
2972+
assert.False(t, isInBackoff, "container backoff should be reset")
2973+
} else {
2974+
assert.True(t, isInBackoff, "container backoff should not be reset")
2975+
}
2976+
})
2977+
}
29112978
}
29122979
}
29132980

0 commit comments

Comments
 (0)