@@ -59,7 +59,6 @@ import (
5959
6060 bootstrapv1 "github.com/k0sproject/k0smotron/api/bootstrap/v1beta1"
6161 cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1"
62- kutil "github.com/k0sproject/k0smotron/internal/util"
6362)
6463
6564const (
@@ -448,7 +447,7 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv
448447 go func () {
449448 // The k8s API of the workload cluster must be available to make requests.
450449 if kcp .Status .Ready {
451- err = c .deleteOldControlNodes (ctx , cluster )
450+ err = c .deleteOldControlNodes (ctx , cluster , kcp )
452451 if err != nil {
453452 logger .Error (err , "Error deleting old control nodes" )
454453 }
@@ -467,7 +466,7 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv
467466 }
468467 }
469468 } else {
470- kubeClient , err := c .getKubeClient (ctx , cluster )
469+ kubeClient , err := c .getKubeClient (ctx , cluster , kcp . Spec . K0sConfigSpec . Tunneling )
471470 if err != nil {
472471 return fmt .Errorf ("error getting cluster client set for machine update: %w" , err )
473472 }
@@ -485,7 +484,7 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv
485484 tooManyMachines ||
486485 isRecreateDeleteFirstPossible (kcp , clusterHasChanged , machineNamesToDelete , desiredMachines ) {
487486 m := activeMachines .Newest ().Name
488- err := c .checkMachineIsReady (ctx , m , cluster )
487+ err := c .checkMachineIsReady (ctx , m , cluster , kcp )
489488 if err != nil {
490489 logger .Error (err , "Error checking machine left" , "machine" , m )
491490 return err
@@ -527,7 +526,7 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv
527526 // machine to be ready It's not slowing down the process overall, as we wait to the first machine anyway to
528527 // create join tokens.
529528 if activeMachines .Len () >= 1 {
530- err := c .checkMachineIsReady (ctx , activeMachines .Newest ().Name , cluster )
529+ err := c .checkMachineIsReady (ctx , activeMachines .Newest ().Name , cluster , kcp )
531530 if err != nil {
532531 return err
533532 }
@@ -590,7 +589,7 @@ func (c *K0sController) deleteK0sNodeResources(ctx context.Context, cluster *clu
590589 logger := log .FromContext (ctx )
591590
592591 if kcp .Status .Ready {
593- kubeClient , err := c .getKubeClient (ctx , cluster )
592+ kubeClient , err := c .getKubeClient (ctx , cluster , kcp . Spec . K0sConfigSpec . Tunneling )
594593 if err != nil {
595594 return fmt .Errorf ("error getting cluster client set for deletion: %w" , err )
596595 }
@@ -656,8 +655,8 @@ func (c *K0sController) createBootstrapConfig(ctx context.Context, name string,
656655 return nil
657656}
658657
659- func (c * K0sController ) checkMachineIsReady (ctx context.Context , machineName string , cluster * clusterv1.Cluster ) error {
660- kubeClient , err := c .getKubeClient (ctx , cluster )
658+ func (c * K0sController ) checkMachineIsReady (ctx context.Context , machineName string , cluster * clusterv1.Cluster , kcp * cpv1beta1. K0sControlPlane ) error {
659+ kubeClient , err := c .getKubeClient (ctx , cluster , kcp . Spec . K0sConfigSpec . Tunneling )
661660 if err != nil {
662661 return fmt .Errorf ("error getting cluster client set for machine update: %w" , err )
663662 }
@@ -727,7 +726,7 @@ func (c *K0sController) reconcileConfig(ctx context.Context, cluster *clusterv1.
727726 }
728727
729728 // Reconcile the dynamic config
730- dErr := kutil .ReconcileDynamicConfig (ctx , cluster , c .Client , * kcp .Spec .K0sConfigSpec .K0s .DeepCopy ())
729+ dErr := util .ReconcileDynamicConfig (ctx , cluster , c .Client , * kcp .Spec .K0sConfigSpec .K0s .DeepCopy (), & kcp . Spec . K0sConfigSpec . Tunneling )
731730 if dErr != nil {
732731 // Don't return error from dynamic config reconciliation, as it may not be created yet
733732 log .Error (fmt .Errorf ("failed to reconcile dynamic config, kubeconfig may not be available yet: %w" , dErr ), "Failed to reconcile dynamic config" )
0 commit comments