Skip to content

Commit b4f6788

Browse files
authored
feat: sync containers resources inplace resize on host cluster (#3494)
Increase k8s version in devspace.yaml for k8s distro to v1.35.0
1 parent d4e40ea commit b4f6788

File tree

3 files changed

+210
-1
lines changed

3 files changed

+210
-1
lines changed

chart/templates/role.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,10 @@ rules:
4040
resources: ["pods/status", "pods/ephemeralcontainers"]
4141
verbs: ["patch", "update"]
4242
{{- end }}
43+
{{- if ge (.Capabilities.KubeVersion.Minor|int) 35 }}
44+
- apiGroups: [""]
45+
resources: ["pods/resize"]
46+
{{- end }}
4347
- apiGroups: ["apps"]
4448
resources: ["statefulsets", "replicasets", "deployments"]
4549
verbs: ["get", "list", "watch"]

pkg/controllers/resources/pods/syncer.go

Lines changed: 90 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,16 @@ package pods
22

33
import (
44
"context"
5+
"encoding/json"
56
"fmt"
67
"reflect"
78
"slices"
89
"time"
910

1011
nodev1 "k8s.io/api/node/v1"
1112
schedulingv1 "k8s.io/api/scheduling/v1"
12-
1313
utilerrors "k8s.io/apimachinery/pkg/util/errors"
14+
utilversion "k8s.io/apimachinery/pkg/util/version"
1415
"k8s.io/apimachinery/pkg/util/wait"
1516
"k8s.io/klog/v2"
1617

@@ -104,6 +105,17 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
104105
return nil, fmt.Errorf("failed to create scheduling config: %w", err)
105106
}
106107

108+
hostClusterVersionInfo, err := ctx.Config.HostClient.Discovery().ServerVersion()
109+
if err != nil {
110+
return nil, fmt.Errorf("failed to get virtual cluster version : %w", err)
111+
}
112+
113+
hostClusterVersion, err := utilversion.ParseSemantic(hostClusterVersionInfo.String())
114+
if err != nil {
115+
// This should never happen
116+
return nil, fmt.Errorf("failed to parse host cluster version : %w", err)
117+
}
118+
107119
return &podSyncer{
108120
GenericTranslator: genericTranslator,
109121
Importer: pro.NewImporter(podsMapper),
@@ -119,6 +131,8 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
119131
nodeSelector: nodeSelector,
120132
tolerations: tolerations,
121133

134+
hostClusterVersion: hostClusterVersion,
135+
122136
podSecurityStandard: ctx.Config.Policies.PodSecurityStandard,
123137
}, nil
124138
}
@@ -138,6 +152,8 @@ type podSyncer struct {
138152
nodeSelector *metav1.LabelSelector
139153
tolerations []*corev1.Toleration
140154

155+
hostClusterVersion *utilversion.Version
156+
141157
podSecurityStandard string
142158
}
143159

@@ -396,6 +412,12 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
396412
}
397413
}()
398414

415+
// resize the host pod container resources in place if the pod spec has changed
416+
err = s.resizeHostPodContainerResourcesInPlace(ctx, event)
417+
if err != nil {
418+
return ctrl.Result{}, err
419+
}
420+
399421
// update the virtual pod if the spec has changed
400422
err = s.podTranslator.Diff(ctx, event)
401423
if err != nil {
@@ -409,6 +431,23 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
409431
return ctrl.Result{}, nil
410432
}
411433

434+
func (s *podSyncer) resizeHostPodContainerResourcesInPlace(ctx *synccontext.SyncContext, event *synccontext.SyncEvent[*corev1.Pod]) error {
435+
if s.hostClusterVersion.LessThan(utilversion.MustParseSemantic("1.35.0")) {
436+
return nil
437+
}
438+
439+
resizePatch, err := buildHostPodContainersResourcesResizePatch(event.Virtual, event.Host)
440+
if err != nil {
441+
return err
442+
}
443+
if resizePatch != nil {
444+
if err := s.applyResizeSubresource(ctx, event.Host, resizePatch); err != nil {
445+
return err
446+
}
447+
}
448+
return nil
449+
}
450+
412451
func (s *podSyncer) SyncToVirtual(ctx *synccontext.SyncContext, event *synccontext.SyncToVirtualEvent[*corev1.Pod]) (_ ctrl.Result, retErr error) {
413452
if event.VirtualOld != nil || translate.ShouldDeleteHostObject(event.Host) {
414453
// virtual object is not here anymore, so we delete
@@ -449,6 +488,56 @@ func setSATokenSecretAsOwner(ctx *synccontext.SyncContext, pClient client.Client
449488
return nil
450489
}
451490

491+
type resizePatch struct {
492+
Spec resizePatchSpec `json:"spec"`
493+
}
494+
495+
type resizePatchSpec struct {
496+
Containers []resizeContainer `json:"containers"`
497+
}
498+
499+
type resizeContainer struct {
500+
Name string `json:"name"`
501+
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
502+
}
503+
504+
func buildHostPodContainersResourcesResizePatch(vPod, pPod *corev1.Pod) ([]byte, error) {
505+
hostContainers := map[string]corev1.Container{}
506+
for _, c := range pPod.Spec.Containers {
507+
hostContainers[c.Name] = c
508+
}
509+
510+
var patchContainers []resizeContainer
511+
for _, v := range vPod.Spec.Containers {
512+
p, ok := hostContainers[v.Name]
513+
if !ok {
514+
continue
515+
}
516+
if equality.Semantic.DeepEqual(p.Resources, v.Resources) {
517+
continue
518+
}
519+
patchContainers = append(patchContainers, resizeContainer{
520+
Name: v.Name,
521+
Resources: v.Resources,
522+
})
523+
}
524+
525+
if len(patchContainers) == 0 {
526+
return nil, nil
527+
}
528+
529+
// TODO: Improve this to potentially integrate pod level resource requests and limits inplace resize when it wil be in GA
530+
return json.Marshal(resizePatch{
531+
Spec: resizePatchSpec{
532+
Containers: patchContainers,
533+
},
534+
})
535+
}
536+
537+
func (s *podSyncer) applyResizeSubresource(ctx *synccontext.SyncContext, hostPod *corev1.Pod, patch []byte) error {
538+
return ctx.HostClient.SubResource("resize").Patch(ctx, hostPod, client.RawPatch(types.StrategicMergePatchType, patch))
539+
}
540+
452541
func (s *podSyncer) ensureNode(ctx *synccontext.SyncContext, pObj *corev1.Pod, vObj *corev1.Pod) (bool, error) {
453542
if vObj.Spec.NodeName != pObj.Spec.NodeName && vObj.Spec.NodeName != "" {
454543
// node of virtual and physical pod are different, we delete the virtual pod to try to recover from this state

pkg/controllers/resources/pods/syncer_test.go

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,15 @@
11
package pods
22

33
import (
4+
"encoding/json"
45
"fmt"
56
"maps"
67
"testing"
78

89
"gotest.tools/assert"
910
corev1 "k8s.io/api/core/v1"
1011
schedulingv1 "k8s.io/api/scheduling/v1"
12+
"k8s.io/apimachinery/pkg/api/resource"
1113
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1214
"k8s.io/apimachinery/pkg/runtime"
1315
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -691,3 +693,117 @@ func TestSync(t *testing.T) {
691693
},
692694
})
693695
}
696+
697+
func TestBuildResizePatch(t *testing.T) {
698+
makePod := func(memory string) *corev1.Pod {
699+
return &corev1.Pod{
700+
ObjectMeta: metav1.ObjectMeta{
701+
Name: "test-pod",
702+
Namespace: "default",
703+
},
704+
Spec: corev1.PodSpec{
705+
Containers: []corev1.Container{
706+
{
707+
Name: "c1",
708+
Image: "nginx",
709+
Resources: corev1.ResourceRequirements{
710+
Requests: corev1.ResourceList{
711+
corev1.ResourceCPU: resource.MustParse("100m"),
712+
corev1.ResourceMemory: resource.MustParse(memory),
713+
},
714+
Limits: corev1.ResourceList{
715+
corev1.ResourceMemory: resource.MustParse(memory),
716+
},
717+
},
718+
},
719+
},
720+
},
721+
}
722+
}
723+
724+
t.Run("creates patch when resources differ", func(t *testing.T) {
725+
vPod := makePod("30Mi")
726+
pPod := makePod("20Mi")
727+
728+
patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
729+
assert.NilError(t, err)
730+
assert.Assert(t, patchBytes != nil)
731+
732+
var patch resizePatch
733+
err = json.Unmarshal(patchBytes, &patch)
734+
assert.NilError(t, err)
735+
assert.Equal(t, len(patch.Spec.Containers), 1)
736+
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
737+
got := patch.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory]
738+
assert.Assert(t, got.Cmp(resource.MustParse("30Mi")) == 0)
739+
})
740+
741+
t.Run("returns nil when resources are equal", func(t *testing.T) {
742+
vPod := makePod("20Mi")
743+
pPod := makePod("20Mi")
744+
745+
patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
746+
assert.NilError(t, err)
747+
assert.Assert(t, patchBytes == nil)
748+
})
749+
750+
t.Run("includes only containers with resource diffs", func(t *testing.T) {
751+
vPod := makePod("30Mi")
752+
vPod.Spec.Containers = append(vPod.Spec.Containers, corev1.Container{
753+
Name: "c2",
754+
Image: "busybox",
755+
Resources: corev1.ResourceRequirements{
756+
Requests: corev1.ResourceList{
757+
corev1.ResourceCPU: resource.MustParse("50m"),
758+
corev1.ResourceMemory: resource.MustParse("10Mi"),
759+
},
760+
},
761+
})
762+
pPod := makePod("20Mi")
763+
pPod.Spec.Containers = append(pPod.Spec.Containers, corev1.Container{
764+
Name: "c2",
765+
Image: "busybox",
766+
Resources: corev1.ResourceRequirements{
767+
Requests: corev1.ResourceList{
768+
corev1.ResourceCPU: resource.MustParse("50m"),
769+
corev1.ResourceMemory: resource.MustParse("10Mi"),
770+
},
771+
},
772+
})
773+
774+
patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
775+
assert.NilError(t, err)
776+
assert.Assert(t, patchBytes != nil)
777+
778+
var patch resizePatch
779+
err = json.Unmarshal(patchBytes, &patch)
780+
assert.NilError(t, err)
781+
assert.Equal(t, len(patch.Spec.Containers), 1)
782+
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
783+
})
784+
785+
t.Run("skips containers missing on host", func(t *testing.T) {
786+
vPod := makePod("30Mi")
787+
vPod.Spec.Containers = append(vPod.Spec.Containers, corev1.Container{
788+
Name: "c2",
789+
Image: "busybox",
790+
Resources: corev1.ResourceRequirements{
791+
Requests: corev1.ResourceList{
792+
corev1.ResourceCPU: resource.MustParse("50m"),
793+
corev1.ResourceMemory: resource.MustParse("10Mi"),
794+
},
795+
},
796+
})
797+
pPod := makePod("20Mi")
798+
799+
patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
800+
assert.NilError(t, err)
801+
assert.Assert(t, patchBytes != nil)
802+
803+
var patch resizePatch
804+
err = json.Unmarshal(patchBytes, &patch)
805+
assert.NilError(t, err)
806+
assert.Equal(t, len(patch.Spec.Containers), 1)
807+
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
808+
})
809+
}

0 commit comments

Comments
 (0)