Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions chart/templates/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,10 @@ rules:
resources: ["pods/status", "pods/ephemeralcontainers"]
verbs: ["patch", "update"]
{{- end }}
{{- if ge (.Capabilities.KubeVersion.Minor|int) 35 }}
- apiGroups: [""]
resources: ["pods/resize"]
{{- end }}
- apiGroups: ["apps"]
resources: ["statefulsets", "replicasets", "deployments"]
verbs: ["get", "list", "watch"]
Expand Down
91 changes: 90 additions & 1 deletion pkg/controllers/resources/pods/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,16 @@ package pods

import (
"context"
"encoding/json"
"fmt"
"reflect"
"slices"
"time"

nodev1 "k8s.io/api/node/v1"
schedulingv1 "k8s.io/api/scheduling/v1"

utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"

Expand Down Expand Up @@ -104,6 +105,17 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
return nil, fmt.Errorf("failed to create scheduling config: %w", err)
}

hostClusterVersionInfo, err := ctx.Config.HostClient.Discovery().ServerVersion()
if err != nil {
return nil, fmt.Errorf("failed to get virtual cluster version : %w", err)
}

hostClusterVersion, err := utilversion.ParseSemantic(hostClusterVersionInfo.String())
if err != nil {
// This should never happen
return nil, fmt.Errorf("failed to parse host cluster version : %w", err)
}

return &podSyncer{
GenericTranslator: genericTranslator,
Importer: pro.NewImporter(podsMapper),
Expand All @@ -119,6 +131,8 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
nodeSelector: nodeSelector,
tolerations: tolerations,

hostClusterVersion: hostClusterVersion,

podSecurityStandard: ctx.Config.Policies.PodSecurityStandard,
}, nil
}
Expand All @@ -138,6 +152,8 @@ type podSyncer struct {
nodeSelector *metav1.LabelSelector
tolerations []*corev1.Toleration

hostClusterVersion *utilversion.Version

podSecurityStandard string
}

Expand Down Expand Up @@ -396,6 +412,12 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
}
}()

// resize the host pod container resources in place if the pod spec has changed
err = s.resizeHostPodContainerResourcesInPlace(ctx, event)
if err != nil {
return ctrl.Result{}, err
}

// update the virtual pod if the spec has changed
err = s.podTranslator.Diff(ctx, event)
if err != nil {
Expand All @@ -409,6 +431,23 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
return ctrl.Result{}, nil
}

func (s *podSyncer) resizeHostPodContainerResourcesInPlace(ctx *synccontext.SyncContext, event *synccontext.SyncEvent[*corev1.Pod]) error {
if s.hostClusterVersion.LessThan(utilversion.MustParseSemantic("1.35.0")) {
return nil
}

resizePatch, err := buildHostPodContainersResourcesResizePatch(event.Virtual, event.Host)
if err != nil {
return err
}
if resizePatch != nil {
if err := s.applyResizeSubresource(ctx, event.Host, resizePatch); err != nil {
return err
}
}
return nil
}

func (s *podSyncer) SyncToVirtual(ctx *synccontext.SyncContext, event *synccontext.SyncToVirtualEvent[*corev1.Pod]) (_ ctrl.Result, retErr error) {
if event.VirtualOld != nil || translate.ShouldDeleteHostObject(event.Host) {
// virtual object is not here anymore, so we delete
Expand Down Expand Up @@ -449,6 +488,56 @@ func setSATokenSecretAsOwner(ctx *synccontext.SyncContext, pClient client.Client
return nil
}

type resizePatch struct {
Spec resizePatchSpec `json:"spec"`
}

type resizePatchSpec struct {
Containers []resizeContainer `json:"containers"`
}

type resizeContainer struct {
Name string `json:"name"`
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
}

func buildHostPodContainersResourcesResizePatch(vPod, pPod *corev1.Pod) ([]byte, error) {
hostContainers := map[string]corev1.Container{}
for _, c := range pPod.Spec.Containers {
hostContainers[c.Name] = c
}

var patchContainers []resizeContainer
for _, v := range vPod.Spec.Containers {
p, ok := hostContainers[v.Name]
if !ok {
continue
}
if equality.Semantic.DeepEqual(p.Resources, v.Resources) {
continue
}
patchContainers = append(patchContainers, resizeContainer{
Name: v.Name,
Resources: v.Resources,
})
}

if len(patchContainers) == 0 {
return nil, nil
}

// TODO: Improve this to potentially integrate pod level resource requests and limits inplace resize when it wil be in GA
return json.Marshal(resizePatch{
Spec: resizePatchSpec{
Containers: patchContainers,
},
})
}

func (s *podSyncer) applyResizeSubresource(ctx *synccontext.SyncContext, hostPod *corev1.Pod, patch []byte) error {
return ctx.HostClient.SubResource("resize").Patch(ctx, hostPod, client.RawPatch(types.StrategicMergePatchType, patch))
}

func (s *podSyncer) ensureNode(ctx *synccontext.SyncContext, pObj *corev1.Pod, vObj *corev1.Pod) (bool, error) {
if vObj.Spec.NodeName != pObj.Spec.NodeName && vObj.Spec.NodeName != "" {
// node of virtual and physical pod are different, we delete the virtual pod to try to recover from this state
Expand Down
116 changes: 116 additions & 0 deletions pkg/controllers/resources/pods/syncer_test.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
package pods

import (
"encoding/json"
"fmt"
"maps"
"testing"

"gotest.tools/assert"
corev1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
Expand Down Expand Up @@ -691,3 +693,117 @@ func TestSync(t *testing.T) {
},
})
}

func TestBuildResizePatch(t *testing.T) {
makePod := func(memory string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "default",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "c1",
Image: "nginx",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse(memory),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse(memory),
},
},
},
},
},
}
}

t.Run("creates patch when resources differ", func(t *testing.T) {
vPod := makePod("30Mi")
pPod := makePod("20Mi")

patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
assert.NilError(t, err)
assert.Assert(t, patchBytes != nil)

var patch resizePatch
err = json.Unmarshal(patchBytes, &patch)
assert.NilError(t, err)
assert.Equal(t, len(patch.Spec.Containers), 1)
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
got := patch.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory]
assert.Assert(t, got.Cmp(resource.MustParse("30Mi")) == 0)
})

t.Run("returns nil when resources are equal", func(t *testing.T) {
vPod := makePod("20Mi")
pPod := makePod("20Mi")

patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
assert.NilError(t, err)
assert.Assert(t, patchBytes == nil)
})

t.Run("includes only containers with resource diffs", func(t *testing.T) {
vPod := makePod("30Mi")
vPod.Spec.Containers = append(vPod.Spec.Containers, corev1.Container{
Name: "c2",
Image: "busybox",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("10Mi"),
},
},
})
pPod := makePod("20Mi")
pPod.Spec.Containers = append(pPod.Spec.Containers, corev1.Container{
Name: "c2",
Image: "busybox",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("10Mi"),
},
},
})

patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
assert.NilError(t, err)
assert.Assert(t, patchBytes != nil)

var patch resizePatch
err = json.Unmarshal(patchBytes, &patch)
assert.NilError(t, err)
assert.Equal(t, len(patch.Spec.Containers), 1)
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
})

t.Run("skips containers missing on host", func(t *testing.T) {
vPod := makePod("30Mi")
vPod.Spec.Containers = append(vPod.Spec.Containers, corev1.Container{
Name: "c2",
Image: "busybox",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("10Mi"),
},
},
})
pPod := makePod("20Mi")

patchBytes, err := buildHostPodContainersResourcesResizePatch(vPod, pPod)
assert.NilError(t, err)
assert.Assert(t, patchBytes != nil)

var patch resizePatch
err = json.Unmarshal(patchBytes, &patch)
assert.NilError(t, err)
assert.Equal(t, len(patch.Spec.Containers), 1)
assert.Equal(t, patch.Spec.Containers[0].Name, "c1")
})
}
Loading