Skip to content

Commit d5d008a

Browse files
committed
Invoke UpdateContainerResources or trigger container restarts (for RestartContainer policy) when memory requests are resized
1 parent 3a14b61 commit d5d008a

File tree

5 files changed

+272
-107
lines changed

5 files changed

+272
-107
lines changed

pkg/kubelet/kuberuntime/kuberuntime_manager.go

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -634,8 +634,8 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
634634
return true
635635
}
636636

637-
determineContainerResize := func(rName v1.ResourceName, specValue, statusValue int64) (resize, restart bool) {
638-
if specValue == statusValue {
637+
determineContainerResize := func(rName v1.ResourceName, desiredValue, currentValue int64) (resize, restart bool) {
638+
if desiredValue == currentValue {
639639
return false, false
640640
}
641641
for _, policy := range container.ResizePolicy {
@@ -646,7 +646,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
646646
// If a resource policy isn't set, the implicit default is NotRequired.
647647
return true, false
648648
}
649-
markContainerForUpdate := func(rName v1.ResourceName, specValue, statusValue int64) {
649+
markContainerForUpdate := func(rName v1.ResourceName, desiredValue, currentValue int64) {
650650
cUpdateInfo := containerToUpdateInfo{
651651
container: &container,
652652
kubeContainerID: kubeContainerStatus.ID,
@@ -655,18 +655,19 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
655655
}
656656
// Order the container updates such that resource decreases are applied before increases
657657
switch {
658-
case specValue > statusValue: // append
658+
case desiredValue > currentValue: // append
659659
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo)
660-
case specValue < statusValue: // prepend
660+
case desiredValue < currentValue: // prepend
661661
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{})
662662
copy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName])
663663
changes.ContainersToUpdate[rName][0] = cUpdateInfo
664664
}
665665
}
666666
resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
667+
resizeMemReq, restartMemReq := determineContainerResize(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
667668
resizeCPULim, restartCPULim := determineContainerResize(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)
668669
resizeCPUReq, restartCPUReq := determineContainerResize(v1.ResourceCPU, desiredResources.cpuRequest, currentResources.cpuRequest)
669-
if restartCPULim || restartCPUReq || restartMemLim {
670+
if restartCPULim || restartCPUReq || restartMemLim || restartMemReq {
670671
// resize policy requires this container to restart
671672
changes.ContainersToKill[kubeContainerStatus.ID] = containerToKillInfo{
672673
name: kubeContainerStatus.Name,
@@ -683,6 +684,8 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
683684
} else {
684685
if resizeMemLim {
685686
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
687+
} else if resizeMemReq {
688+
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
686689
}
687690
if resizeCPULim {
688691
markContainerForUpdate(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)

pkg/kubelet/kuberuntime/kuberuntime_manager_test.go

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2890,6 +2890,96 @@ func TestComputePodActionsForPodResize(t *testing.T) {
28902890
return &pa
28912891
},
28922892
},
2893+
"Update container memory (requests only) with RestartContainer policy for memory": {
2894+
setupFn: func(pod *v1.Pod) {
2895+
c := &pod.Spec.Containers[2]
2896+
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired}
2897+
c.Resources = v1.ResourceRequirements{
2898+
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
2899+
Requests: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2900+
}
2901+
setupActuatedResources(pod, c, v1.ResourceRequirements{
2902+
Limits: v1.ResourceList{
2903+
v1.ResourceCPU: cpu200m.DeepCopy(),
2904+
v1.ResourceMemory: mem200M.DeepCopy(),
2905+
},
2906+
Requests: v1.ResourceList{
2907+
v1.ResourceCPU: cpu100m.DeepCopy(),
2908+
v1.ResourceMemory: mem200M.DeepCopy(),
2909+
},
2910+
})
2911+
},
2912+
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2913+
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
2914+
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
2915+
killMap[kcs.ID] = containerToKillInfo{
2916+
container: &pod.Spec.Containers[2],
2917+
name: pod.Spec.Containers[2].Name,
2918+
}
2919+
pa := podActions{
2920+
SandboxID: podStatus.SandboxStatuses[0].Id,
2921+
ContainersToStart: []int{2},
2922+
ContainersToKill: killMap,
2923+
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
2924+
UpdatePodResources: true,
2925+
}
2926+
return &pa
2927+
},
2928+
},
2929+
"Update container memory (requests only) with RestartNotRequired policy for memory": {
2930+
setupFn: func(pod *v1.Pod) {
2931+
c := &pod.Spec.Containers[2]
2932+
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartNotRequired}
2933+
c.Resources = v1.ResourceRequirements{
2934+
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
2935+
Requests: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2936+
}
2937+
setupActuatedResources(pod, c, v1.ResourceRequirements{
2938+
Limits: v1.ResourceList{
2939+
v1.ResourceCPU: cpu200m.DeepCopy(),
2940+
v1.ResourceMemory: mem200M.DeepCopy(),
2941+
},
2942+
Requests: v1.ResourceList{
2943+
v1.ResourceCPU: cpu100m.DeepCopy(),
2944+
v1.ResourceMemory: mem200M.DeepCopy(),
2945+
},
2946+
})
2947+
},
2948+
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2949+
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
2950+
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
2951+
killMap[kcs.ID] = containerToKillInfo{
2952+
container: &pod.Spec.Containers[2],
2953+
name: pod.Spec.Containers[2].Name,
2954+
}
2955+
pa := podActions{
2956+
SandboxID: podStatus.SandboxStatuses[0].Id,
2957+
ContainersToStart: []int{},
2958+
ContainersToKill: getKillMap(pod, podStatus, []int{}),
2959+
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
2960+
v1.ResourceMemory: {
2961+
{
2962+
container: &pod.Spec.Containers[2],
2963+
kubeContainerID: kcs.ID,
2964+
desiredContainerResources: containerResources{
2965+
memoryLimit: mem200M.Value(),
2966+
memoryRequest: mem100M.Value(),
2967+
cpuLimit: cpu200m.MilliValue(),
2968+
cpuRequest: cpu100m.MilliValue(),
2969+
},
2970+
currentContainerResources: &containerResources{
2971+
memoryLimit: mem200M.Value(),
2972+
memoryRequest: mem200M.Value(),
2973+
cpuLimit: cpu200m.MilliValue(),
2974+
cpuRequest: cpu100m.MilliValue(),
2975+
},
2976+
},
2977+
},
2978+
},
2979+
}
2980+
return &pa
2981+
},
2982+
},
28932983
} {
28942984
t.Run(desc, func(t *testing.T) {
28952985
pod, status := makeBasePodAndStatus()

0 commit comments

Comments
 (0)