@@ -2,15 +2,16 @@ package pods
22
33import (
44 "context"
5+ "encoding/json"
56 "fmt"
67 "reflect"
78 "slices"
89 "time"
910
1011 nodev1 "k8s.io/api/node/v1"
1112 schedulingv1 "k8s.io/api/scheduling/v1"
12-
1313 utilerrors "k8s.io/apimachinery/pkg/util/errors"
14+ utilversion "k8s.io/apimachinery/pkg/util/version"
1415 "k8s.io/apimachinery/pkg/util/wait"
1516 "k8s.io/klog/v2"
1617
@@ -104,6 +105,17 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
104105 return nil , fmt .Errorf ("failed to create scheduling config: %w" , err )
105106 }
106107
108+ hostClusterVersionInfo , err := ctx .Config .HostClient .Discovery ().ServerVersion ()
109+ if err != nil {
110+ return nil , fmt .Errorf ("failed to get virtual cluster version : %w" , err )
111+ }
112+
113+ hostClusterVersion , err := utilversion .ParseSemantic (hostClusterVersionInfo .String ())
114+ if err != nil {
115+ // This should never happen
116+ return nil , fmt .Errorf ("failed to parse host cluster version : %w" , err )
117+ }
118+
107119 return & podSyncer {
108120 GenericTranslator : genericTranslator ,
109121 Importer : pro .NewImporter (podsMapper ),
@@ -119,6 +131,8 @@ func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
119131 nodeSelector : nodeSelector ,
120132 tolerations : tolerations ,
121133
134+ hostClusterVersion : hostClusterVersion ,
135+
122136 podSecurityStandard : ctx .Config .Policies .PodSecurityStandard ,
123137 }, nil
124138}
@@ -138,6 +152,8 @@ type podSyncer struct {
138152 nodeSelector * metav1.LabelSelector
139153 tolerations []* corev1.Toleration
140154
155+ hostClusterVersion * utilversion.Version
156+
141157 podSecurityStandard string
142158}
143159
@@ -392,6 +408,12 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
392408 }
393409 }()
394410
411+ // resize the host pod container resources in place if the pod spec has changed
412+ err = s .resizeHostPodContainerResourcesInPlace (ctx , event )
413+ if err != nil {
414+ return ctrl.Result {}, err
415+ }
416+
395417 // update the virtual pod if the spec has changed
396418 err = s .podTranslator .Diff (ctx , event )
397419 if err != nil {
@@ -405,6 +427,23 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv
405427 return ctrl.Result {}, nil
406428}
407429
430+ func (s * podSyncer ) resizeHostPodContainerResourcesInPlace (ctx * synccontext.SyncContext , event * synccontext.SyncEvent [* corev1.Pod ]) error {
431+ if s .hostClusterVersion .LessThan (utilversion .MustParseSemantic ("1.35.0" )) {
432+ return nil
433+ }
434+
435+ resizePatch , err := buildHostPodContainersResourcesResizePatch (event .Virtual , event .Host )
436+ if err != nil {
437+ return err
438+ }
439+ if resizePatch != nil {
440+ if err := s .applyResizeSubresource (ctx , event .Host , resizePatch ); err != nil {
441+ return err
442+ }
443+ }
444+ return nil
445+ }
446+
408447func (s * podSyncer ) SyncToVirtual (ctx * synccontext.SyncContext , event * synccontext.SyncToVirtualEvent [* corev1.Pod ]) (_ ctrl.Result , retErr error ) {
409448 if event .VirtualOld != nil || translate .ShouldDeleteHostObject (event .Host ) {
410449 // virtual object is not here anymore, so we delete
@@ -445,6 +484,58 @@ func setSATokenSecretAsOwner(ctx *synccontext.SyncContext, pClient client.Client
445484 return nil
446485}
447486
487+ type resizePatch struct {
488+ Spec resizePatchSpec `json:"spec"`
489+ }
490+
491+ type resizePatchSpec struct {
492+ Containers []resizeContainer `json:"containers"`
493+ }
494+
495+ type resizeContainer struct {
496+ Name string `json:"name"`
497+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
498+ }
499+
500+ func buildHostPodContainersResourcesResizePatch (vPod , pPod * corev1.Pod ) ([]byte , error ) {
501+ hostContainers := map [string ]corev1.Container {}
502+ for _ , c := range pPod .Spec .Containers {
503+ hostContainers [c .Name ] = c
504+ }
505+
506+ var patchContainers []resizeContainer
507+ for _ , v := range vPod .Spec .Containers {
508+ p , ok := hostContainers [v .Name ]
509+ if ! ok {
510+ continue
511+ }
512+ if equality .Semantic .DeepEqual (p .Resources , v .Resources ) {
513+ continue
514+ }
515+
516+ var resources corev1.ResourceRequirements
517+ patchContainers = append (patchContainers , resizeContainer {
518+ Name : v .Name ,
519+ Resources : resources ,
520+ })
521+ }
522+
523+ if len (patchContainers ) == 0 {
524+ return nil , nil
525+ }
526+
527+ // TODO: Improve this to potentially integrate pod level resource requests and limits inplace resize when it wil be in GA
528+ return json .Marshal (resizePatch {
529+ Spec : resizePatchSpec {
530+ Containers : patchContainers ,
531+ },
532+ })
533+ }
534+
535+ func (s * podSyncer ) applyResizeSubresource (ctx * synccontext.SyncContext , hostPod * corev1.Pod , patch []byte ) error {
536+ return ctx .HostClient .SubResource ("resize" ).Patch (ctx , hostPod , client .RawPatch (types .StrategicMergePatchType , patch ))
537+ }
538+
448539func (s * podSyncer ) ensureNode (ctx * synccontext.SyncContext , pObj * corev1.Pod , vObj * corev1.Pod ) (bool , error ) {
449540 if vObj .Spec .NodeName != pObj .Spec .NodeName && vObj .Spec .NodeName != "" {
450541 // node of virtual and physical pod are different, we delete the virtual pod to try to recover from this state
0 commit comments