Skip to content

Commit a7abd35

Browse files
authored
Merge pull request kubernetes-sigs#6684 from prometherion/refactor/docker-node
🌱 patching Docker-based nodes provider ID using client-runtime
2 parents 82d823b + 20a845d commit a7abd35

File tree

8 files changed

+68
-40
lines changed

8 files changed

+68
-40
lines changed

test/infrastructure/docker/controllers/alias.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"sigs.k8s.io/controller-runtime/pkg/client"
2525
"sigs.k8s.io/controller-runtime/pkg/controller"
2626

27+
"sigs.k8s.io/cluster-api/controllers/remote"
2728
"sigs.k8s.io/cluster-api/test/infrastructure/container"
2829
dockercontrollers "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers"
2930
)
@@ -35,13 +36,15 @@ import (
3536
type DockerMachineReconciler struct {
3637
Client client.Client
3738
ContainerRuntime container.Runtime
39+
Tracker *remote.ClusterCacheTracker
3840
}
3941

4042
// SetupWithManager sets up the reconciler with the Manager.
4143
func (r *DockerMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
4244
return (&dockercontrollers.DockerMachineReconciler{
4345
Client: r.Client,
4446
ContainerRuntime: r.ContainerRuntime,
47+
Tracker: r.Tracker,
4548
}).SetupWithManager(ctx, mgr, options)
4649
}
4750

test/infrastructure/docker/exp/controllers/alias.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
"sigs.k8s.io/controller-runtime/pkg/client"
2626
"sigs.k8s.io/controller-runtime/pkg/controller"
2727

28+
"sigs.k8s.io/cluster-api/controllers/remote"
2829
"sigs.k8s.io/cluster-api/test/infrastructure/container"
2930
dockermachinepoolcontrollers "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/internal/controllers"
3031
)
@@ -34,6 +35,7 @@ type DockerMachinePoolReconciler struct {
3435
Client client.Client
3536
Scheme *runtime.Scheme
3637
ContainerRuntime container.Runtime
38+
Tracker *remote.ClusterCacheTracker
3739
}
3840

3941
// SetupWithManager will add watches for this controller.
@@ -42,5 +44,6 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr
4244
Client: r.Client,
4345
Scheme: r.Scheme,
4446
ContainerRuntime: r.ContainerRuntime,
47+
Tracker: r.Tracker,
4548
}).SetupWithManager(ctx, mgr, options)
4649
}

test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import (
3434
"sigs.k8s.io/controller-runtime/pkg/source"
3535

3636
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
37+
"sigs.k8s.io/cluster-api/controllers/remote"
3738
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
3839
utilexp "sigs.k8s.io/cluster-api/exp/util"
3940
"sigs.k8s.io/cluster-api/test/infrastructure/container"
@@ -49,6 +50,7 @@ type DockerMachinePoolReconciler struct {
4950
Client client.Client
5051
Scheme *runtime.Scheme
5152
ContainerRuntime container.Runtime
53+
Tracker *remote.ClusterCacheTracker
5254
}
5355

5456
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockermachinepools,verbs=get;list;watch;create;update;patch;delete
@@ -186,7 +188,11 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust
186188
}
187189

188190
// Reconcile machines and updates Status.Instances
189-
res, err := pool.ReconcileMachines(ctx)
191+
remoteClient, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(cluster))
192+
if err != nil {
193+
return ctrl.Result{}, errors.Wrap(err, "failed to generate workload cluster client")
194+
}
195+
res, err := pool.ReconcileMachines(ctx, remoteClient)
190196
if err != nil {
191197
return res, err
192198
}

test/infrastructure/docker/exp/internal/docker/nodepool.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluste
7979
// currently the nodepool supports only a recreate strategy for replacing old nodes with new ones
8080
// (all existing machines are killed before new ones are created).
8181
// TODO: consider if to support a Rollout strategy (a more progressive node replacement).
82-
func (np *NodePool) ReconcileMachines(ctx context.Context) (ctrl.Result, error) {
82+
func (np *NodePool) ReconcileMachines(ctx context.Context, remoteClient client.Client) (ctrl.Result, error) {
8383
desiredReplicas := int(*np.machinePool.Spec.Replicas)
8484

8585
// Delete all the machines in excess (outdated machines or machines exceeding desired replica count).
@@ -140,7 +140,7 @@ func (np *NodePool) ReconcileMachines(ctx context.Context) (ctrl.Result, error)
140140
result := ctrl.Result{}
141141
for i := range np.machines {
142142
machine := np.machines[i]
143-
if res, err := np.reconcileMachine(ctx, machine); err != nil || !res.IsZero() {
143+
if res, err := np.reconcileMachine(ctx, machine, remoteClient); err != nil || !res.IsZero() {
144144
if err != nil {
145145
return ctrl.Result{}, errors.Wrap(err, "failed to reconcile machine")
146146
}
@@ -240,7 +240,7 @@ func (np *NodePool) refresh(ctx context.Context) error {
240240
}
241241

242242
// reconcileMachine will build and provision a docker machine and update the docker machine pool status for that instance.
243-
func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machine) (ctrl.Result, error) {
243+
func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machine, remoteClient client.Client) (ctrl.Result, error) {
244244
log := ctrl.LoggerFrom(ctx)
245245

246246
var machineStatus infraexpv1.DockerMachinePoolInstanceStatus
@@ -334,7 +334,7 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin
334334
// Usually a cloud provider will do this, but there is no docker-cloud provider.
335335
// Requeue if there is an error, as this is likely momentary load balancer
336336
// state changes during control plane provisioning.
337-
if err := externalMachine.SetNodeProviderID(ctx); err != nil {
337+
if err = externalMachine.SetNodeProviderID(ctx, remoteClient); err != nil {
338338
log.V(4).Info("transient error setting the provider id")
339339
return ctrl.Result{Requeue: true}, nil //nolint:nilerr
340340
}

test/infrastructure/docker/internal/controllers/dockermachine_controller.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ import (
3535

3636
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3737
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
38+
"sigs.k8s.io/cluster-api/controllers/remote"
3839
"sigs.k8s.io/cluster-api/test/infrastructure/container"
3940
infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
4041
"sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker"
@@ -49,6 +50,7 @@ import (
4950
type DockerMachineReconciler struct {
5051
client.Client
5152
ContainerRuntime container.Runtime
53+
Tracker *remote.ClusterCacheTracker
5254
}
5355

5456
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockermachines,verbs=get;list;watch;create;update;patch;delete
@@ -310,7 +312,11 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster *
310312
// Usually a cloud provider will do this, but there is no docker-cloud provider.
311313
// Requeue if there is an error, as this is likely momentary load balancer
312314
// state changes during control plane provisioning.
313-
if err := externalMachine.SetNodeProviderID(ctx); err != nil {
315+
remoteClient, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(cluster))
316+
if err != nil {
317+
return ctrl.Result{}, errors.Wrap(err, "failed to generate workload cluster client")
318+
}
319+
if err := externalMachine.SetNodeProviderID(ctx, remoteClient); err != nil {
314320
if errors.As(err, &docker.ContainerNotRunningError{}) {
315321
return ctrl.Result{}, errors.Wrap(err, "failed to patch the Kubernetes node with the machine providerID")
316322
}

test/infrastructure/docker/internal/docker/machine.go

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,11 @@ import (
2828

2929
"github.com/go-logr/logr"
3030
"github.com/pkg/errors"
31+
corev1 "k8s.io/api/core/v1"
32+
apimachinerytypes "k8s.io/apimachinery/pkg/types"
3133
"k8s.io/apimachinery/pkg/util/wait"
3234
ctrl "sigs.k8s.io/controller-runtime"
35+
"sigs.k8s.io/controller-runtime/pkg/client"
3336
"sigs.k8s.io/kind/pkg/apis/config/v1alpha4"
3437
"sigs.k8s.io/kind/pkg/cluster/constants"
3538

@@ -42,6 +45,7 @@ import (
4245
"sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/provisioning/cloudinit"
4346
"sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/provisioning/ignition"
4447
clusterapicontainer "sigs.k8s.io/cluster-api/util/container"
48+
"sigs.k8s.io/cluster-api/util/patch"
4549
)
4650

4751
const (
@@ -381,7 +385,7 @@ func (m *Machine) CheckForBootstrapSuccess(ctx context.Context) error {
381385
}
382386

383387
// SetNodeProviderID sets the docker provider ID for the kubernetes node.
384-
func (m *Machine) SetNodeProviderID(ctx context.Context) error {
388+
func (m *Machine) SetNodeProviderID(ctx context.Context, c client.Client) error {
385389
log := ctrl.LoggerFrom(ctx)
386390

387391
kubectlNode, err := m.getKubectlNode(ctx)
@@ -392,20 +396,21 @@ func (m *Machine) SetNodeProviderID(ctx context.Context) error {
392396
return errors.Wrapf(ContainerNotRunningError{Name: kubectlNode.Name}, "unable to set NodeProviderID")
393397
}
394398

399+
node := &corev1.Node{}
400+
if err = c.Get(ctx, apimachinerytypes.NamespacedName{Name: m.ContainerName()}, node); err != nil {
401+
return errors.Wrap(err, "failed to retrieve node")
402+
}
403+
395404
log.Info("Setting Kubernetes node providerID")
396-
patch := fmt.Sprintf(`{"spec": {"providerID": %q}}`, m.ProviderID())
397-
cmd := kubectlNode.Commander.Command(
398-
"kubectl",
399-
"--kubeconfig", "/etc/kubernetes/kubelet.conf",
400-
"patch",
401-
"node", m.ContainerName(),
402-
"--patch", patch,
403-
)
404-
lines, err := cmd.RunLoggingOutputOnFail(ctx)
405+
406+
patchHelper, err := patch.NewHelper(node, c)
405407
if err != nil {
406-
for _, line := range lines {
407-
log.Info(line)
408-
}
408+
return err
409+
}
410+
411+
node.Spec.ProviderID = m.ProviderID()
412+
413+
if err = patchHelper.Patch(ctx, node); err != nil {
409414
return errors.Wrap(err, "failed update providerID")
410415
}
411416

test/infrastructure/docker/internal/docker/types/node.go

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ limitations under the License.
1818
package types
1919

2020
import (
21-
"bufio"
22-
"bytes"
2321
"context"
2422
"io"
2523
"path/filepath"
@@ -164,22 +162,6 @@ type ContainerCmd struct {
164162
stderr io.Writer
165163
}
166164

167-
// RunLoggingOutputOnFail runs the cmd, logging error output if Run returns an error.
168-
func (c *ContainerCmd) RunLoggingOutputOnFail(ctx context.Context) ([]string, error) {
169-
var buff bytes.Buffer
170-
c.SetStdout(&buff)
171-
c.SetStderr(&buff)
172-
err := c.Run(ctx)
173-
out := make([]string, 0)
174-
if err != nil {
175-
scanner := bufio.NewScanner(&buff)
176-
for scanner.Scan() {
177-
out = append(out, scanner.Text())
178-
}
179-
}
180-
return out, errors.WithStack(err)
181-
}
182-
183165
// Run will run a configured ContainerCmd inside a container instance.
184166
func (c *ContainerCmd) Run(ctx context.Context) error {
185167
containerRuntime, err := container.RuntimeFrom(ctx)

test/infrastructure/docker/main.go

Lines changed: 26 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -185,9 +185,33 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {
185185
os.Exit(1)
186186
}
187187

188+
log := ctrl.Log.WithName("remote").WithName("ClusterCacheTracker")
189+
tracker, err := remote.NewClusterCacheTracker(
190+
mgr,
191+
remote.ClusterCacheTrackerOptions{
192+
Log: &log,
193+
Indexes: remote.DefaultIndexes,
194+
},
195+
)
196+
if err != nil {
197+
setupLog.Error(err, "unable to create cluster cache tracker")
198+
os.Exit(1)
199+
}
200+
201+
if err := (&remote.ClusterCacheReconciler{
202+
Client: mgr.GetClient(),
203+
Tracker: tracker,
204+
}).SetupWithManager(ctx, mgr, controller.Options{
205+
MaxConcurrentReconciles: concurrency,
206+
}); err != nil {
207+
setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler")
208+
os.Exit(1)
209+
}
210+
188211
if err := (&controllers.DockerMachineReconciler{
189212
Client: mgr.GetClient(),
190213
ContainerRuntime: runtimeClient,
214+
Tracker: tracker,
191215
}).SetupWithManager(ctx, mgr, controller.Options{
192216
MaxConcurrentReconciles: concurrency,
193217
}); err != nil {
@@ -207,9 +231,8 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {
207231
if err := (&expcontrollers.DockerMachinePoolReconciler{
208232
Client: mgr.GetClient(),
209233
ContainerRuntime: runtimeClient,
210-
}).SetupWithManager(ctx, mgr, controller.Options{
211-
MaxConcurrentReconciles: concurrency,
212-
}); err != nil {
234+
Tracker: tracker,
235+
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: concurrency}); err != nil {
213236
setupLog.Error(err, "unable to create controller", "controller", "DockerMachinePool")
214237
os.Exit(1)
215238
}

0 commit comments

Comments
 (0)