@@ -2,15 +2,16 @@ package pods
22
33import (
44 "context"
5+ "encoding/json"
56 "fmt"
67 "reflect"
78 "slices"
89 "time"
910
1011 nodev1 "k8s.io/api/node/v1"
1112 schedulingv1 "k8s.io/api/scheduling/v1"
12-
1313 utilerrors "k8s.io/apimachinery/pkg/util/errors"
14+ utilversion "k8s.io/apimachinery/pkg/util/version"
1415 "k8s.io/apimachinery/pkg/util/wait"
1516 "k8s.io/klog/v2"
1617
@@ -104,6 +105,17 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
104105 return nil , fmt .Errorf ("failed to create scheduling config: %w" , err )
105106 }
106107
108+ hostClusterVersionInfo , err := ctx .Config .HostClient .Discovery ().ServerVersion ()
109+ if err != nil {
110+ return nil , fmt .Errorf ("failed to get virtual cluster version : %w" , err )
111+ }
112+
113+ hostClusterVersion , err := utilversion .ParseSemantic (hostClusterVersionInfo .String ())
114+ if err != nil {
115+ // This should never happen
116+ return nil , fmt .Errorf ("failed to parse host cluster version : %w" , err )
117+ }
118+
107119 return & podSyncer {
108120 GenericTranslator : genericTranslator ,
109121 Importer : pro .NewImporter (podsMapper ),
@@ -119,6 +131,8 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
119131 nodeSelector : nodeSelector ,
120132 tolerations : tolerations ,
121133
134+ hostClusterVersion : hostClusterVersion ,
135+
122136 podSecurityStandard : ctx .Config .Policies .PodSecurityStandard ,
123137 }, nil
124138}
@@ -138,6 +152,8 @@ type podSyncer struct {
138152 nodeSelector * metav1.LabelSelector
139153 tolerations []* corev1.Toleration
140154
155+ hostClusterVersion * utilversion.Version
156+
141157 podSecurityStandard string
142158}
143159
@@ -396,6 +412,12 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
396412 }
397413 }()
398414
415+ // resize the host pod container resources in place if the pod spec has changed
416+ err = s .resizeHostPodContainerResourcesInPlace (ctx , event )
417+ if err != nil {
418+ return ctrl.Result {}, err
419+ }
420+
399421 // update the virtual pod if the spec has changed
400422 err = s .podTranslator .Diff (ctx , event )
401423 if err != nil {
@@ -409,6 +431,23 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
409431 return ctrl.Result {}, nil
410432}
411433
434+ func (s * podSyncer ) resizeHostPodContainerResourcesInPlace (ctx * synccontext.SyncContext , event * synccontext.SyncEvent [* corev1.Pod ]) error {
435+ if s .hostClusterVersion .LessThan (utilversion .MustParseSemantic ("1.35.0" )) {
436+ return nil
437+ }
438+
439+ resizePatch , err := buildHostPodContainersResourcesResizePatch (event .Virtual , event .Host )
440+ if err != nil {
441+ return err
442+ }
443+ if resizePatch != nil {
444+ if err := s .applyResizeSubresource (ctx , event .Host , resizePatch ); err != nil {
445+ return err
446+ }
447+ }
448+ return nil
449+ }
450+
412451func (s * podSyncer ) SyncToVirtual (ctx * synccontext.SyncContext , event * synccontext.SyncToVirtualEvent [* corev1.Pod ]) (_ ctrl.Result , retErr error ) {
413452 if event .VirtualOld != nil || translate .ShouldDeleteHostObject (event .Host ) {
414453 // virtual object is not here anymore, so we delete
@@ -449,6 +488,56 @@ func setSATokenSecretAsOwner(ctx *synccontext.SyncContext, pClient client.Client
449488 return nil
450489}
451490
491+ type resizePatch struct {
492+ Spec resizePatchSpec `json:"spec"`
493+ }
494+
495+ type resizePatchSpec struct {
496+ Containers []resizeContainer `json:"containers"`
497+ }
498+
499+ type resizeContainer struct {
500+ Name string `json:"name"`
501+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
502+ }
503+
504+ func buildHostPodContainersResourcesResizePatch (vPod , pPod * corev1.Pod ) ([]byte , error ) {
505+ hostContainers := map [string ]corev1.Container {}
506+ for _ , c := range pPod .Spec .Containers {
507+ hostContainers [c .Name ] = c
508+ }
509+
510+ var patchContainers []resizeContainer
511+ for _ , v := range vPod .Spec .Containers {
512+ p , ok := hostContainers [v .Name ]
513+ if ! ok {
514+ continue
515+ }
516+ if equality .Semantic .DeepEqual (p .Resources , v .Resources ) {
517+ continue
518+ }
519+ patchContainers = append (patchContainers , resizeContainer {
520+ Name : v .Name ,
521+ Resources : v .Resources ,
522+ })
523+ }
524+
525+ if len (patchContainers ) == 0 {
526+ return nil , nil
527+ }
528+
529+ // TODO: Improve this to potentially integrate pod level resource requests and limits inplace resize when it wil be in GA
530+ return json .Marshal (resizePatch {
531+ Spec : resizePatchSpec {
532+ Containers : patchContainers ,
533+ },
534+ })
535+ }
536+
537+ func (s * podSyncer ) applyResizeSubresource (ctx * synccontext.SyncContext , hostPod * corev1.Pod , patch []byte ) error {
538+ return ctx .HostClient .SubResource ("resize" ).Patch (ctx , hostPod , client .RawPatch (types .StrategicMergePatchType , patch ))
539+ }
540+
452541func (s * podSyncer ) ensureNode (ctx * synccontext.SyncContext , pObj * corev1.Pod , vObj * corev1.Pod ) (bool , error ) {
453542 if vObj .Spec .NodeName != pObj .Spec .NodeName && vObj .Spec .NodeName != "" {
454543 // node of virtual and physical pod are different, we delete the virtual pod to try to recover from this state
0 commit comments