Skip to content

Commit c720d24

Browse files
committed
feat: sync containers resources inplace resize on host cluster
Increase k8s version in devspace.yaml for k8s distro to v1.35.0
1 parent 4372dfb commit c720d24

File tree

4 files changed

+230
-1
lines changed

4 files changed

+230
-1
lines changed

chart/templates/role.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,10 @@ rules:
4040
resources: ["pods/status", "pods/ephemeralcontainers"]
4141
verbs: ["patch", "update"]
4242
{{- end }}
43+
{{- if ge (.Capabilities.KubeVersion.Minor|int) 35 }}
44+
- apiGroups: [""]
45+
resources: ["pods/resize"]
46+
{{- end }}
4347
- apiGroups: ["apps"]
4448
resources: ["statefulsets", "replicasets", "deployments"]
4549
verbs: ["get", "list", "watch"]

chart/tests/role_test.yaml

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,24 @@ tests:
112112
path: metadata.namespace
113113
value: my-namespace
114114

115+
- it: adds pods/resize rule for kube 1.35+
116+
release:
117+
name: my-pods-resize-release
118+
namespace: my-pods-resize-namespace
119+
capabilities:
120+
minorVersion: 35
121+
asserts:
122+
- hasDocuments:
123+
count: 1
124+
- lengthEqual:
125+
path: rules
126+
count: 9
127+
- contains:
128+
path: rules
129+
content:
130+
apiGroups: [""]
131+
resources: ["pods/resize"]
132+
115133
- it: multi-namespace mode
116134
set:
117135
sync:

pkg/controllers/resources/pods/syncer.go

Lines changed: 92 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,16 @@ package pods
22

33
import (
44
"context"
5+
"encoding/json"
56
"fmt"
67
"reflect"
78
"slices"
89
"time"
910

1011
nodev1 "k8s.io/api/node/v1"
1112
schedulingv1 "k8s.io/api/scheduling/v1"
12-
1313
utilerrors "k8s.io/apimachinery/pkg/util/errors"
14+
utilversion "k8s.io/apimachinery/pkg/util/version"
1415
"k8s.io/apimachinery/pkg/util/wait"
1516
"k8s.io/klog/v2"
1617

@@ -104,6 +105,17 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
104105
return nil, fmt.Errorf("failed to create scheduling config: %w", err)
105106
}
106107

108+
hostClusterVersionInfo, err := ctx.Config.HostClient.Discovery().ServerVersion()
109+
if err != nil {
110+
return nil, fmt.Errorf("failed to get virtual cluster version : %w", err)
111+
}
112+
113+
hostClusterVersion, err := utilversion.ParseSemantic(hostClusterVersionInfo.String())
114+
if err != nil {
115+
// This should never happen
116+
return nil, fmt.Errorf("failed to parse host cluster version : %w", err)
117+
}
118+
107119
return &podSyncer{
108120
GenericTranslator: genericTranslator,
109121
Importer: pro.NewImporter(podsMapper),
@@ -119,6 +131,8 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
119131
nodeSelector: nodeSelector,
120132
tolerations: tolerations,
121133

134+
hostClusterVersion: hostClusterVersion,
135+
122136
podSecurityStandard: ctx.Config.Policies.PodSecurityStandard,
123137
}, nil
124138
}
@@ -138,6 +152,8 @@ type podSyncer struct {
138152
nodeSelector *metav1.LabelSelector
139153
tolerations []*corev1.Toleration
140154

155+
hostClusterVersion *utilversion.Version
156+
141157
podSecurityStandard string
142158
}
143159

@@ -392,6 +408,12 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
392408
}
393409
}()
394410

411+
// resize the host pod container resources in place if the pod spec has changed
412+
err = s.resizeHostPodContainerResourcesInPlace(ctx, event)
413+
if err != nil {
414+
return ctrl.Result{}, err
415+
}
416+
395417
// update the virtual pod if the spec has changed
396418
err = s.podTranslator.Diff(ctx, event)
397419
if err != nil {
@@ -405,6 +427,23 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
405427
return ctrl.Result{}, nil
406428
}
407429

430+
func (s *podSyncer) resizeHostPodContainerResourcesInPlace(ctx *synccontext.SyncContext, event *synccontext.SyncEvent[*corev1.Pod]) error {
431+
if s.hostClusterVersion.LessThan(utilversion.MustParseSemantic("1.35.0")) {
432+
return nil
433+
}
434+
435+
resizePatch, err := buildHostPodContainersResourcesResizePatch(event.Virtual, event.Host)
436+
if err != nil {
437+
return err
438+
}
439+
if resizePatch != nil {
440+
if err := s.applyResizeSubresource(ctx, event.Host, resizePatch); err != nil {
441+
return err
442+
}
443+
}
444+
return nil
445+
}
446+
408447
func (s *podSyncer) SyncToVirtual(ctx *synccontext.SyncContext, event *synccontext.SyncToVirtualEvent[*corev1.Pod]) (_ ctrl.Result, retErr error) {
409448
if event.VirtualOld != nil || translate.ShouldDeleteHostObject(event.Host) {
410449
// virtual object is not here anymore, so we delete
@@ -445,6 +484,58 @@ func setSATokenSecretAsOwner(ctx *synccontext.SyncContext, pClient client.Client
445484
return nil
446485
}
447486

487+
type resizePatch struct {
488+
Spec resizePatchSpec `json:"spec"`
489+
}
490+
491+
type resizePatchSpec struct {
492+
Containers []resizeContainer `json:"containers"`
493+
}
494+
495+
type resizeContainer struct {
496+
Name string `json:"name"`
497+
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
498+
}
499+
500+
func buildHostPodContainersResourcesResizePatch(vPod, pPod *corev1.Pod) ([]byte, error) {
501+
hostContainers := map[string]corev1.Container{}
502+
for _, c := range pPod.Spec.Containers {
503+
hostContainers[c.Name] = c
504+
}
505+
506+
var patchContainers []resizeContainer
507+
for _, v := range vPod.Spec.Containers {
508+
p, ok := hostContainers[v.Name]
509+
if !ok {
510+
continue
511+
}
512+
if equality.Semantic.DeepEqual(p.Resources, v.Resources) {
513+
continue
514+
}
515+
516+
var resources corev1.ResourceRequirements
517+
patchContainers = append(patchContainers, resizeContainer{
518+
Name: v.Name,
519+
Resources: resources,
520+
})
521+
}
522+
523+
if len(patchContainers) == 0 {
524+
return nil, nil
525+
}
526+
527+
// TODO: Improve this to potentially integrate pod level resource requests and limits inplace resize when it wil be in GA
528+
return json.Marshal(resizePatch{
529+
Spec: resizePatchSpec{
530+
Containers: patchContainers,
531+
},
532+
})
533+
}
534+
535+
func (s *podSyncer) applyResizeSubresource(ctx *synccontext.SyncContext, hostPod *corev1.Pod, patch []byte) error {
536+
return ctx.HostClient.SubResource("resize").Patch(ctx, hostPod, client.RawPatch(types.StrategicMergePatchType, patch))
537+
}
538+
448539
func (s *podSyncer) ensureNode(ctx *synccontext.SyncContext, pObj *corev1.Pod, vObj *corev1.Pod) (bool, error) {
449540
if vObj.Spec.NodeName != pObj.Spec.NodeName && vObj.Spec.NodeName != "" {
450541
// node of virtual and physical pod are different, we delete the virtual pod to try to recover from this state

pkg/controllers/resources/pods/syncer_test.go

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,15 @@
11
package pods
22

33
import (
4+
"encoding/json"
45
"fmt"
56
"maps"
67
"testing"
78

89
"gotest.tools/assert"
910
corev1 "k8s.io/api/core/v1"
1011
schedulingv1 "k8s.io/api/scheduling/v1"
12+
"k8s.io/apimachinery/pkg/api/resource"
1113
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1214
"k8s.io/apimachinery/pkg/runtime"
1315
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -691,3 +693,117 @@ func TestSync(t *testing.T) {
691693
},
692694
})
693695
}
696+
697+
func TestBuildResizePatch(t *testing.T) {
698+
makePod := func(memory string) *corev1.Pod {
699+
return &corev1.Pod{
700+
ObjectMeta: metav1.ObjectMeta{
701+
Name: "test-pod",
702+
Namespace: "default",
703+
},
704+
Spec: corev1.PodSpec{
705+
Containers: []corev1.Container{
706+
{
707+
Name: "c1",
708+
Image: "nginx",
709+
Resources: corev1.ResourceRequirements{
710+
Requests: corev1.ResourceList{
711+
corev1.ResourceCPU: resource.MustParse("100m"),
712+
corev1.ResourceMemory: resource.MustParse(memory),
713+
},
714+
Limits: corev1.ResourceList{
715+
corev1.ResourceMemory: resource.MustParse(memory),
716+
},
717+
},
718+
},
719+
},
720+
},
721+
}
722+
}
723+
724+
t.Run("creates patch when resources differ", func(t *testing.T) {
725+
vPod := makePod("30Mi")
726+
pPod := makePod("20Mi")
727+
728+
patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
729+
assert.NilError(t, err)
730+
assert.Assert(t, patchBytes != nil)
731+
732+
var patch resizePatch
733+
err = json.Unmarshal(patchBytes, &patch)
734+
assert.NilError(t, err)
735+
assert.Equal(t, len(patch.Spec.Containers), 1)
736+
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
737+
got := patch.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory]
738+
assert.Assert(t, got.Cmp(resource.MustParse("30Mi")) == 0)
739+
})
740+
741+
t.Run("returns nil when resources are equal", func(t *testing.T) {
742+
vPod := makePod("20Mi")
743+
pPod := makePod("20Mi")
744+
745+
patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
746+
assert.NilError(t, err)
747+
assert.Assert(t, patchBytes == nil)
748+
})
749+
750+
t.Run("includes only containers with resource diffs", func(t *testing.T) {
751+
vPod := makePod("30Mi")
752+
vPod.Spec.Containers = append(vPod.Spec.Containers, corev1.Container{
753+
Name: "c2",
754+
Image: "busybox",
755+
Resources: corev1.ResourceRequirements{
756+
Requests: corev1.ResourceList{
757+
corev1.ResourceCPU: resource.MustParse("50m"),
758+
corev1.ResourceMemory: resource.MustParse("10Mi"),
759+
},
760+
},
761+
})
762+
pPod := makePod("20Mi")
763+
pPod.Spec.Containers = append(pPod.Spec.Containers, corev1.Container{
764+
Name: "c2",
765+
Image: "busybox",
766+
Resources: corev1.ResourceRequirements{
767+
Requests: corev1.ResourceList{
768+
corev1.ResourceCPU: resource.MustParse("50m"),
769+
corev1.ResourceMemory: resource.MustParse("10Mi"),
770+
},
771+
},
772+
})
773+
774+
patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
775+
assert.NilError(t, err)
776+
assert.Assert(t, patchBytes != nil)
777+
778+
var patch resizePatch
779+
err = json.Unmarshal(patchBytes, &patch)
780+
assert.NilError(t, err)
781+
assert.Equal(t, len(patch.Spec.Containers), 1)
782+
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
783+
})
784+
785+
t.Run("skips containers missing on host", func(t *testing.T) {
786+
vPod := makePod("30Mi")
787+
vPod.Spec.Containers = append(vPod.Spec.Containers, corev1.Container{
788+
Name: "c2",
789+
Image: "busybox",
790+
Resources: corev1.ResourceRequirements{
791+
Requests: corev1.ResourceList{
792+
corev1.ResourceCPU: resource.MustParse("50m"),
793+
corev1.ResourceMemory: resource.MustParse("10Mi"),
794+
},
795+
},
796+
})
797+
pPod := makePod("20Mi")
798+
799+
patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
800+
assert.NilError(t, err)
801+
assert.Assert(t, patchBytes != nil)
802+
803+
var patch resizePatch
804+
err = json.Unmarshal(patchBytes, &patch)
805+
assert.NilError(t, err)
806+
assert.Equal(t, len(patch.Spec.Containers), 1)
807+
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
808+
})
809+
}

0 commit comments

Comments
 (0)