diff --git a/test/infrastructure/vcsim/README.md b/test/infrastructure/vcsim/README.md index af5afb6689..aab88db438 100644 --- a/test/infrastructure/vcsim/README.md +++ b/test/infrastructure/vcsim/README.md @@ -69,7 +69,8 @@ source vcsim.env $ source vcsim.env # Then you are ready to create a workload cluster -$ cat | envsubst | kubectl apply -f - +make envsubst +$ cat | hack/tools/bin/envsubst | kubectl apply -f - ``` #### Using govc with vcsim diff --git a/test/infrastructure/vcsim/api/v1alpha1/vmoperatordependencies_types_test.go b/test/infrastructure/vcsim/api/v1alpha1/vmoperatordependencies_types_test.go new file mode 100644 index 0000000000..4917a6781d --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/vmoperatordependencies_types_test.go @@ -0,0 +1,65 @@ +package v1alpha1 + +import ( + "fmt" + "testing" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func TestVMOperatorDependencies_SetVCenterFromVCenterSimulator(t *testing.T) { + type fields struct { + TypeMeta v1.TypeMeta + ObjectMeta v1.ObjectMeta + Spec VMOperatorDependenciesSpec + Status VMOperatorDependenciesStatus + } + type args struct { + vCenterSimulator *VCenterSimulator + } + tests := []struct { + name string + fields fields + args args + }{ + { + args: args{ + vCenterSimulator: &VCenterSimulator{ + Status: VCenterSimulatorStatus{ + Host: "Host", + Username: "Username", + Password: "Password", + Thumbprint: "Thumbprint", + }, + }, + }, + fields: fields{ + TypeMeta: v1.TypeMeta{ + Kind: "VMOperatorDependencies", + APIVersion: GroupVersion.String(), + }, + ObjectMeta: v1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: VMOperatorDependenciesSpec{}, + Status: VMOperatorDependenciesStatus{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &VMOperatorDependencies{ + TypeMeta: tt.fields.TypeMeta, + ObjectMeta: tt.fields.ObjectMeta, + Spec: tt.fields.Spec, + Status: tt.fields.Status, + } + d.SetVCenterFromVCenterSimulator(tt.args.vCenterSimulator) + + foo, _ := yaml.Marshal(d) + fmt.Println(string(foo)) + }) + } +} diff --git a/test/infrastructure/vcsim/controllers/backends/containers/doc.go b/test/infrastructure/vcsim/controllers/backends/containers/doc.go new file mode 100644 index 0000000000..6ef5ce0eed --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/containers/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package containers implements backend for ControlPlaneEndpoint and VirtualMachines implemented using containers +// running on a host machine (inspired from CAPD). +package containers diff --git a/test/infrastructure/vcsim/controllers/backends/controlplaneendpoint.go b/test/infrastructure/vcsim/controllers/backends/controlplaneendpoint.go new file mode 100644 index 0000000000..db93d92ca0 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/controlplaneendpoint.go @@ -0,0 +1,30 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backends + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +type ControlPlaneEndpointReconciler interface { + ReconcileNormal(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) + ReconcileDelete(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) +} diff --git a/test/infrastructure/vcsim/controllers/backends/inmemory/controlplaneendpoint.go b/test/infrastructure/vcsim/controllers/backends/inmemory/controlplaneendpoint.go new file mode 100644 index 0000000000..3e103079f0 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/inmemory/controlplaneendpoint.go @@ -0,0 +1,75 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inmemory + +import ( + "context" + + "github.com/pkg/errors" + "k8s.io/klog/v2" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +type ControlPlaneEndpointReconciler struct { + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux + PodIP string +} + +func (r *ControlPlaneEndpointReconciler) ReconcileNormal(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VCSim ControlPlaneEndpoint") + + // Initialize a listener for the workload cluster. + // IMPORTANT: The fact that both the listener and the resourceGroup for a workload cluster have + // the same name is used as assumptions in other part of the implementation. + listenerName := klog.KObj(controlPlaneEndpoint).String() + listener, err := r.APIServerMux.InitWorkloadClusterListener(listenerName) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to init the listener for the control plane endpoint") + } + + controlPlaneEndpoint.Status.Host = r.PodIP // NOTE: we are replacing the listener ip with the pod ip so it will be accessible from other pods as well + controlPlaneEndpoint.Status.Port = listener.Port() + + return ctrl.Result{}, nil +} + +func (r *ControlPlaneEndpointReconciler) ReconcileDelete(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling delete VCSim ControlPlaneEndpoint") + listenerName := klog.KObj(controlPlaneEndpoint).String() + + // Delete the resource group hosting all the cloud resources belonging the workload cluster; + if resourceGroup, err := r.APIServerMux.ResourceGroupByWorkloadCluster(listenerName); err == nil { + r.InMemoryManager.DeleteResourceGroup(resourceGroup) + } + + // Delete the listener for the workload cluster; + if err := r.APIServerMux.DeleteWorkloadClusterListener(listenerName); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to delete the listener for the control plane endpoint") + } + + controllerutil.RemoveFinalizer(controlPlaneEndpoint, vcsimv1.ControlPlaneEndpointFinalizer) + + return ctrl.Result{}, nil +} diff --git a/test/infrastructure/vcsim/controllers/backends/inmemory/doc.go b/test/infrastructure/vcsim/controllers/backends/inmemory/doc.go new file mode 100644 index 0000000000..e9a95ac3e0 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/inmemory/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package inmemory implements backend for ControlPlaneEndpoint and VirtualMachines implemented using CAPI inmemory test utils. +package inmemory diff --git a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go b/test/infrastructure/vcsim/controllers/backends/inmemory/inmemorymachine_bootstrap.go similarity index 91% rename from test/infrastructure/vcsim/controllers/vmbootstrap_controller.go rename to test/infrastructure/vcsim/controllers/backends/inmemory/inmemorymachine_bootstrap.go index 9d469d65a7..c3b219c801 100644 --- a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go +++ b/test/infrastructure/vcsim/controllers/backends/inmemory/inmemorymachine_bootstrap.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors. +Copyright 2025 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package inmemory import ( "context" @@ -69,7 +69,7 @@ const ( ) var ( // TODO: make this configurable - nodeStartupDuration = 10 * time.Second + NodeStartupDuration = 2 * time.Second nodeStartupJitter = 0.3 ) @@ -82,7 +82,7 @@ const ( ) var ( // TODO: make this configurable - etcdStartupDuration = 10 * time.Second + etcdStartupDuration = 2 * time.Second etcdStartupJitter = 0.3 ) @@ -95,7 +95,7 @@ const ( ) var ( // TODO: make this configurable - apiServerStartupDuration = 10 * time.Second + apiServerStartupDuration = 2 * time.Second apiServerStartupJitter = 0.3 ) @@ -136,7 +136,7 @@ type ConditionsTracker interface { v1beta1conditions.Setter } -type vmBootstrapReconciler struct { +type inMemoryMachineBootstrapReconciler struct { Client client.Client InMemoryManager inmemoryruntime.Manager APIServerMux *inmemoryserver.WorkloadClustersMux @@ -145,7 +145,7 @@ type vmBootstrapReconciler struct { GetProviderID func() string } -func (r *vmBootstrapReconciler) reconcileBoostrap(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrap(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) if !v1beta1conditions.Has(conditionsTracker, VMProvisionedCondition) { @@ -202,11 +202,11 @@ func (r *vmBootstrapReconciler) reconcileBoostrap(ctx context.Context, cluster * return res, kerrors.NewAggregate(errs) } -func (r *vmBootstrapReconciler) reconcileBoostrapNode(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrapNode(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) nodeName := conditionsTracker.GetName() - provisioningDuration := nodeStartupDuration + provisioningDuration := NodeStartupDuration provisioningDuration += time.Duration(rand.Float64() * nodeStartupJitter * float64(provisioningDuration)) //nolint:gosec // Intentionally using a weak random number generator here. start := v1beta1conditions.Get(conditionsTracker, VMProvisionedCondition).LastTransitionTime @@ -287,7 +287,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapNode(ctx context.Context, clust return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) etcdMember := fmt.Sprintf("etcd-%s", conditionsTracker.GetName()) @@ -431,7 +431,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, clust return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) apiServer := fmt.Sprintf("kube-apiserver-%s", conditionsTracker.GetName()) @@ -540,7 +540,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -587,7 +587,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -634,7 +634,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.C return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapKubeadmObjects(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrapKubeadmObjects(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -701,7 +701,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapKubeadmObjects(ctx context.Cont return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapKubeProxy(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrapKubeProxy(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -747,7 +747,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapKubeProxy(ctx context.Context, return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapCoredns(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileBoostrapCoredns(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -810,7 +810,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapCoredns(ctx context.Context, cl return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // Call the inner reconciliation methods. phases := []func(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error){ r.reconcileDeleteNode, @@ -839,7 +839,7 @@ func (r *vmBootstrapReconciler) reconcileDelete(ctx context.Context, cluster *cl return res, kerrors.NewAggregate(errs) } -func (r *vmBootstrapReconciler) reconcileDeleteNode(ctx context.Context, cluster *clusterv1beta1.Cluster, _ *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileDeleteNode(ctx context.Context, cluster *clusterv1beta1.Cluster, _ *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // Compute the resource group unique name. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -859,7 +859,7 @@ func (r *vmBootstrapReconciler) reconcileDeleteNode(ctx context.Context, cluster return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -896,7 +896,7 @@ func (r *vmBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -928,7 +928,7 @@ func (r *vmBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cl return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDeleteScheduler(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileDeleteScheduler(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -951,7 +951,7 @@ func (r *vmBootstrapReconciler) reconcileDeleteScheduler(ctx context.Context, cl return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDeleteControllerManager(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *inMemoryMachineBootstrapReconciler) reconcileDeleteControllerManager(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -980,7 +980,7 @@ type etcdInfo struct { members sets.Set[string] } -func (r *vmBootstrapReconciler) getEtcdInfo(ctx context.Context, inmemoryClient inmemoryruntime.Client) (etcdInfo, error) { +func (r *inMemoryMachineBootstrapReconciler) getEtcdInfo(ctx context.Context, inmemoryClient inmemoryruntime.Client) (etcdInfo, error) { etcdPods := &corev1.PodList{} if err := inmemoryClient.List(ctx, etcdPods, client.InNamespace(metav1.NamespaceSystem), diff --git a/test/infrastructure/vcsim/controllers/backends/inmemory/virtualmachine.go b/test/infrastructure/vcsim/controllers/backends/inmemory/virtualmachine.go new file mode 100644 index 0000000000..69e3c44f1c --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/inmemory/virtualmachine.go @@ -0,0 +1,219 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inmemory + +import ( + "context" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime/client" + inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +type VirtualMachineReconciler struct { + Client client.Client + InMemoryManager inmemoryruntime.Manager + InMemoryClient inmemoryclient.Client + APIServerMux *inmemoryserver.WorkloadClustersMux + + IsVMWaitingforIP func() bool + GetVCenterSession func(ctx context.Context) (*session.Session, error) + GetVMPath func() string + + IsVMReady func() bool + GetProviderID func() string +} + +func (r *VirtualMachineReconciler) ReconcileNormal(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (_ ctrl.Result, reterr error) { + resourceGroup := klog.KObj(cluster).String() + r.InMemoryManager.AddResourceGroup(resourceGroup) + r.InMemoryClient = r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + if result, err := r.reconcileNamespacesAndRegisterResourceGroup(ctx, cluster, resourceGroup); err != nil || !result.IsZero() { + return result, err + } + + conditionsTracker, err := r.getConditionTracker(ctx, virtualMachine) + if err != nil { + return ctrl.Result{}, err + } + // Always attempt to Patch the conditionsTracker object and status after each reconciliation. + defer func() { + // NOTE: Patch on conditionsTracker will only track of provisioning process of the fake node, etcd, api server, etc. + if err := r.InMemoryClient.Update(ctx, conditionsTracker); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + ipReconciler := r.getVMIpReconciler() + if ret, err := ipReconciler.ReconcileIP(ctx); !ret.IsZero() || err != nil { + return ret, err + } + + bootstrapReconciler := r.getVMBootstrapReconciler() + if ret, err := bootstrapReconciler.reconcileBoostrap(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { + return ret, err + } + + return ctrl.Result{}, nil +} + +func (r *VirtualMachineReconciler) ReconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (_ ctrl.Result, reterr error) { + // Compute the resource group unique name. + resourceGroup := klog.KObj(cluster).String() + r.InMemoryManager.AddResourceGroup(resourceGroup) + r.InMemoryClient = r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + conditionsTracker, err := r.getConditionTracker(ctx, virtualMachine) + if err != nil { + return ctrl.Result{}, err + } + // Always attempt to Patch the conditionsTracker object and status after each reconciliation. + defer func() { + // NOTE: Patch on conditionsTracker will only track of provisioning process of the fake node, etcd, api server, etc. + if err := r.InMemoryClient.Update(ctx, conditionsTracker); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + bootstrapReconciler := r.getVMBootstrapReconciler() + if ret, err := bootstrapReconciler.reconcileDelete(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { + return ret, err + } + + controllerutil.RemoveFinalizer(virtualMachine, vcsimv1.VMFinalizer) + return ctrl.Result{}, nil +} + +func (r *VirtualMachineReconciler) reconcileNamespacesAndRegisterResourceGroup(ctx context.Context, cluster *clusterv1beta1.Cluster, resourceGroup string) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // Create default Namespaces. + for _, nsName := range []string{metav1.NamespaceDefault, metav1.NamespacePublic, metav1.NamespaceSystem} { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: nsName, + Labels: map[string]string{ + "kubernetes.io/metadata.name": nsName, + }, + }, + } + + if err := r.InMemoryClient.Get(ctx, client.ObjectKeyFromObject(ns), ns); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to get %s Namespace", nsName) + } + + if err := r.InMemoryClient.Create(ctx, ns); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create %s Namespace", nsName) + } + } + } + + // Registering ResourceGroup for ControlPlaneEndpoint + if _, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup); err != nil { + l := &vcsimv1.ControlPlaneEndpointList{} + if err := r.Client.List(ctx, l); err != nil { + return ctrl.Result{}, err + } + found := false + for _, c := range l.Items { + c := c + if c.Status.Host != cluster.Spec.ControlPlaneEndpoint.Host || c.Status.Port != cluster.Spec.ControlPlaneEndpoint.Port { + continue + } + + listenerName := klog.KObj(&c).String() + log.Info("Registering ResourceGroup for ControlPlaneEndpoint", "ResourceGroup", resourceGroup, "ControlPlaneEndpoint", listenerName) + err := r.APIServerMux.RegisterResourceGroup(listenerName, resourceGroup) + if err != nil { + return ctrl.Result{}, err + } + found = true + break + } + if !found { + return ctrl.Result{}, errors.Errorf("unable to find a ControlPlaneEndpoint for host %s, port %d", cluster.Spec.ControlPlaneEndpoint.Host, cluster.Spec.ControlPlaneEndpoint.Port) + } + } + return ctrl.Result{}, nil +} + +func (r *VirtualMachineReconciler) getVMIpReconciler() *virtualMachineIPReconciler { + return &virtualMachineIPReconciler{ + Client: r.Client, + + // Type specific functions; those functions wraps the differences between govmomi and supervisor types, + // thus allowing to use the same virtualMachineIPReconciler in both scenarios. + GetVCenterSession: r.GetVCenterSession, + IsVMWaitingforIP: r.IsVMWaitingforIP, + GetVMPath: r.GetVMPath, + } +} + +func (r *VirtualMachineReconciler) getVMBootstrapReconciler() *inMemoryMachineBootstrapReconciler { + return &inMemoryMachineBootstrapReconciler{ + Client: r.Client, + InMemoryManager: r.InMemoryManager, + APIServerMux: r.APIServerMux, + + // Type specific functions; those functions wraps the differences between govmomi and supervisor types, + // thus allowing to use the same inMemoryMachineBootstrapReconciler in both scenarios. + IsVMReady: r.IsVMReady, + GetProviderID: r.GetProviderID, + } +} + +func (r *VirtualMachineReconciler) getConditionTracker(ctx context.Context, virtualMachine client.Object) (*infrav1.VSphereVM, error) { + // Check if there is a conditionsTracker in the resource group. + // The conditionsTracker is an object stored in memory with the scope of storing conditions used for keeping + // track of the provisioning process of the fake node, etcd, api server, etc for this specific virtualMachine. + // (the process managed by this controller). + // NOTE: The type of the in memory conditionsTracker object doesn't matter as soon as it implements Cluster API's conditions interfaces. + // Unfortunately vmoprv1.VirtualMachine isn't a condition getter, so we fallback on using a infrav1.VSphereVM. + conditionsTracker := &infrav1.VSphereVM{} + if err := r.InMemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker); err != nil { + if !apierrors.IsNotFound(err) { + return nil, errors.Wrap(err, "failed to get conditionsTracker") + } + + conditionsTracker = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: virtualMachine.GetName(), + Namespace: virtualMachine.GetNamespace(), + }, + } + if err := r.InMemoryClient.Create(ctx, conditionsTracker); err != nil { + return nil, errors.Wrap(err, "failed to create conditionsTracker") + } + } + return conditionsTracker, nil +} diff --git a/test/infrastructure/vcsim/controllers/vmip_controller.go b/test/infrastructure/vcsim/controllers/backends/inmemory/virtualmachine_ip.go similarity index 96% rename from test/infrastructure/vcsim/controllers/vmip_controller.go rename to test/infrastructure/vcsim/controllers/backends/inmemory/virtualmachine_ip.go index de0b757736..52fe52f245 100644 --- a/test/infrastructure/vcsim/controllers/vmip_controller.go +++ b/test/infrastructure/vcsim/controllers/backends/inmemory/virtualmachine_ip.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors. +Copyright 2025 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package inmemory import ( "context" @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" ) -type vmIPReconciler struct { +type virtualMachineIPReconciler struct { Client client.Client IsVMWaitingforIP func() bool @@ -38,7 +38,7 @@ type vmIPReconciler struct { GetVMPath func() string } -func (r *vmIPReconciler) ReconcileIP(ctx context.Context) (ctrl.Result, error) { +func (r *virtualMachineIPReconciler) ReconcileIP(ctx context.Context) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) // No op if f the VM is still provisioning, or it already has an IP, return. diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod.go new file mode 100644 index 0000000000..5418c98c38 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod.go @@ -0,0 +1,739 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/client" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +const ( + serviceCIDR = "10.96.0.0/16" + podCIDR = "10.244.0.0/16" + dnsDomain = "cluster.local" +) + +// caSecretHandler implement handling for the secrets storing the control plane certificate authorities. +type caSecretHandler struct { + // TODO: in a follow up iteration we want to make it possible to store those objects in a dedicate ns on a separated cluster + // this brings in the limitation that objects for two clusters with the same name cannot be hosted in a single namespace as well as the need to rethink owner references. + client client.Client + + cluster *clusterv1beta1.Cluster + virtualMachine client.Object + virtualMachineGVK schema.GroupVersionKind +} + +func (ca *caSecretHandler) LookupOrGenerate(ctx context.Context) error { + certificates := secret.NewCertificatesForInitialControlPlane(&bootstrapv1.ClusterConfiguration{}) + + // Generate cluster certificates on the management cluster if not already there. + // Note: the code is taking care of service cleanup during the deletion workflow, + // so this controllerRef is mostly used to express a semantic relation. + controllerRef := metav1.NewControllerRef(ca.virtualMachine, ca.virtualMachineGVK) + if err := certificates.LookupOrGenerate(ctx, ca.client, client.ObjectKeyFromObject(ca.cluster), *controllerRef); err != nil { + return errors.Wrap(err, "failed to generate cluster certificates on the management cluster") + } + + // TODO: generate certificates on the backing cluster, they are required by generate files + + return nil +} + +// kubeConfigSecretHandler implement handling for the secret storing the cluster admin kubeconfig. +type kubeConfigSecretHandler struct { + // TODO: in a follow up iteration we want to make it possible to store those objects in a dedicate ns on a separated cluster + // this brings in the limitation that objects for two clusters with the same name cannot be hosted in a single namespace as well as the need to rethink owner references. + client client.Client + + cluster *clusterv1beta1.Cluster + virtualMachine client.Object + virtualMachineGVK schema.GroupVersionKind +} + +func (ca *kubeConfigSecretHandler) LookupOrGenerate(ctx context.Context) error { + // If the secret with the KubeConfig already exists, then no-op. + if k, _ := secret.GetFromNamespacedName(ctx, ca.client, client.ObjectKeyFromObject(ca.cluster), secret.Kubeconfig); k != nil { + return nil + } + + // Otherwise it is required to generate the secret storing the cluster admin kubeconfig. + // Note: the code is taking care of service cleanup during the deletion workflow, + // so this controllerRef is mostly used to express a semantic relation. + controllerRef := metav1.NewControllerRef(ca.virtualMachine, ca.virtualMachineGVK) + if err := kubeconfig.CreateSecretWithOwner(ctx, ca.client, client.ObjectKeyFromObject(ca.cluster), ca.cluster.Spec.ControlPlaneEndpoint.String(), *controllerRef); err != nil { + return errors.Wrap(err, "failed to generate cluster certificates on the management cluster") + } + return nil +} + +// controlPlanePodHandler implement handling for the Pod implementing a control plane. +type controlPlanePodHandler struct { + // TODO: in a follow up iteration we want to make it possible to store those objects in a dedicate ns on a separated cluster + // this brings in the limitation that objects for two clusters with the same name cannot be hosted in a single namespace as well as the need to rethink owner references. + client client.Client + + controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint + cluster *clusterv1beta1.Cluster + virtualMachine client.Object + virtualMachineGVK schema.GroupVersionKind +} + +func (p *controlPlanePodHandler) LookupAndGenerateRBAC(ctx context.Context) error { + // TODO: think about cleanup or comment that cleanup of RBAC rules won't happen. + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: p.virtualMachine.GetNamespace(), + Name: "kubemark-control-plane", + }, + Rules: []rbacv1.PolicyRule{ + { + // TODO: consider if to restrict this somehow + Verbs: []string{"get"}, + APIGroups: []string{""}, // "" indicates the core API group + Resources: []string{"secrets"}, + }, + }, + } + if err := p.client.Get(ctx, client.ObjectKeyFromObject(role), role); err != nil { + switch { + case apierrors.IsNotFound(err): + if err := p.client.Create(ctx, role); err != nil { + return errors.Wrap(err, "failed to create kubemark-control-plane Role") + } + break + case apierrors.IsAlreadyExists(err): + break + default: + return errors.Wrap(err, "failed to get kubemark-control-plane Role") + } + } + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: p.virtualMachine.GetNamespace(), + Name: "kubemark-control-plane", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "User", + APIGroup: "rbac.authorization.k8s.io", + // TODO: create a service account and use it here instead of default + use it in the Pod + Name: "system:serviceaccount:default:default", + Namespace: p.virtualMachine.GetNamespace(), + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: "kubemark-control-plane", + }, + } + if err := p.client.Get(ctx, client.ObjectKeyFromObject(roleBinding), roleBinding); err != nil { + switch { + case apierrors.IsNotFound(err): + if err := p.client.Create(ctx, roleBinding); err != nil { + return errors.Wrap(err, "failed to create kubemark-control-plane RoleBinding") + } + break + case apierrors.IsAlreadyExists(err): + break + default: + return errors.Wrap(err, "failed to get kubemark-control-plane RoleBinding") + } + } + return nil +} + +func (p *controlPlanePodHandler) Generate(ctx context.Context, kubernetesVersion string) error { + // Gets info about the Pod is running the manager in. + managerPodNamespace := os.Getenv("POD_NAMESPACE") + managerPodName := os.Getenv("POD_NAME") + managerPodUID := types.UID(os.Getenv("POD_UID")) + + // Gets the Pod is running the manager in from the management cluster and validate it is the right one. + managerPod := &corev1.Pod{} + managerPodKey := types.NamespacedName{Namespace: managerPodNamespace, Name: managerPodName} + if err := p.client.Get(ctx, managerPodKey, managerPod); err != nil { + return errors.Wrap(err, "failed to get manager pod") + } + if managerPod.UID != managerPodUID { + return errors.Errorf("manager pod UID does not match, expected %s, got %s", managerPodUID, managerPod.UID) + } + + // Identify the Container is running the manager in, so we can get the image currently in use for the manager. + managerContainer := &corev1.Container{} + for i := range managerPod.Spec.Containers { + c := managerPod.Spec.Containers[i] + if c.Name == "manager" { + managerContainer = &c + } + } + + if managerContainer == nil { + return errors.New("failed to get container from manager pod") + } + + // Generate the control plane Pod in the BackingCluster. + // TODO: think about owerRef. + // Note: the code is taking care of service cleanup during the deletion workflow, + // so this ownerRef is mostly used to express a semantic relation. + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: p.virtualMachine.GetNamespace(), + // Kubernetes will generate a name with the cluster name as a prefix. + GenerateName: fmt.Sprintf("%s-control-plane-", p.virtualMachine.GetName()), + Labels: map[string]string{ + // Following labels will be used to identify the control plane pods later on. + "control-plane-endpoint.vcsim.infrastructure.cluster.x-k8s.io": p.controlPlaneEndpoint.Name, + + // Useful labels + clusterv1beta1.ClusterNameLabel: p.cluster.Name, + clusterv1beta1.MachineControlPlaneLabel: "", + }, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + // Use an init container to generate all the key, certificates and KubeConfig files + // required for the control plane to run. + generateFilesContainer(managerContainer.Image, p.cluster.Name, p.cluster.Spec.ControlPlaneEndpoint.Host), + }, + Containers: []corev1.Container{ + // Stacked etcd member for this control plane instance. + etcdContainer(kubernetesVersion), + // The control plane instance. + // Note: control plane components are wired up in order to work well with immutable upgrades (each control plane instance is self-contained), + apiServerContainer(kubernetesVersion), + schedulerContainer(kubernetesVersion), + controllerManagerContainer(kubernetesVersion), + // eventually adds a dubug container with a volume containing all the generated files + // TODO: add the debug container conditionally, e.g. if there is an annotation on the virtual machine object. + // debugContainer(), + }, + PriorityClassName: "system-node-critical", + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: &corev1.SeccompProfile{ + Type: "RuntimeDefault", + }, + }, + RestartPolicy: corev1.RestartPolicyAlways, + Volumes: []corev1.Volume{ + { + Name: "etcd-data", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "etc-kubernetes", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + } + + if err := p.client.Create(ctx, pod); err != nil { + return errors.Wrap(err, "failed to create control plane pod") + } + + // Wait for the pod to show up in the cache + if err := wait.PollUntilContextTimeout(ctx, 250*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) { + if err := p.client.Get(ctx, client.ObjectKeyFromObject(pod), pod); err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil + }); err != nil { + return errors.Wrap(err, "failed to get newly created control plane pod") + } + return nil +} + +func (p *controlPlanePodHandler) GetPods(ctx context.Context) (*corev1.PodList, error) { + options := []client.ListOption{ + client.InNamespace(p.virtualMachine.GetNamespace()), + client.MatchingLabels{ + "control-plane-endpoint.vcsim.infrastructure.cluster.x-k8s.io": p.controlPlaneEndpoint.GetName(), + }, + } + + // TODO: live client or wait for cache update ... + pods := &corev1.PodList{} + if err := p.client.List(ctx, pods, options...); err != nil { + return nil, errors.Wrap(err, "failed to list control plane pods") + } + return pods, nil +} + +func (p *controlPlanePodHandler) Delete(ctx context.Context, podName string) error { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: p.virtualMachine.GetNamespace(), + Name: podName, + }, + } + if err := p.client.Delete(ctx, pod); err != nil { + return errors.Wrap(err, "failed to delete control plane pod") + } + return nil +} + +func generateFilesContainer(managerImage string, clusterName string, controlPaneEndPointHost string) corev1.Container { + c := corev1.Container{ + Name: "generate-files", + // Note: we are using the manager instead of another binary for convenience (the manager is already built and packaged + // into an image that is published during the release process). + Image: managerImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "/manager", + "--generate-control-plane-files", + }, + Env: []corev1.EnvVar{ + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "status.podIP", + }, + }, + }, + { + Name: "CLUSTER_NAME", + Value: clusterName, + }, + { + Name: "CONTROL_PLANE_ENDPOINT_HOST", + Value: controlPaneEndPointHost, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "etc-kubernetes", + MountPath: "/etc/kubernetes", + }, + }, + } + return c +} + +func etcdContainer(kubernetesVersion string) corev1.Container { + var etcdVersion string + // TODO: mirror map from kubeadm + switch kubernetesVersion { + default: + etcdVersion = "3.5.4-0" + } + + c := corev1.Container{ + Name: "etcd", + Image: fmt.Sprintf("registry.k8s.io/etcd:%s", etcdVersion), + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "status.podIP", + }, + }, + }, + }, + Command: []string{ + "etcd", + "--advertise-client-urls=https://$(POD_IP):2379", + "--cert-file=/etc/kubernetes/pki/etcd/server.crt", + "--client-cert-auth=true", + "--data-dir=/var/lib/etcd", + "--experimental-initial-corrupt-check=true", + "--experimental-watch-progress-notify-interval=5s", + "--initial-advertise-peer-urls=https://$(POD_IP):2380", + "--initial-cluster=$(POD_NAME)=https://$(POD_IP):2380", + "--key-file=/etc/kubernetes/pki/etcd/server.key", + "--listen-client-urls=https://127.0.0.1:2379,https://$(POD_IP):2379", + "--listen-metrics-urls=http://127.0.0.1:2381", + "--listen-peer-urls=https://$(POD_IP):2380", + "--name=$(POD_NAME)", + "--peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt", + "--peer-client-cert-auth=true", + "--peer-key-file=/etc/kubernetes/pki/etcd/peer.key", + "--peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt", + "--snapshot-count=10000", + "--trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt", + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "etcd-data", + MountPath: "/var/lib/etcd", + }, + { + Name: "etc-kubernetes", + MountPath: "/etc/kubernetes", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "etcd-peer", + ContainerPort: 2380, + }, + // TODO: check if we can drop this port + /* + { + Name: "etcd-client", + ContainerPort: 2379, + }, + */ + }, + // TODO: enable probes + /* + StartupProbe: &corev1.Probe{ + FailureThreshold: 24, + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health?serializable=false", + Port: intstr.FromInt(2381), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 15, + PeriodSeconds: 10, + }, + LivenessProbe: &corev1.Probe{ + FailureThreshold: 8, + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health?exclude=NOSPACE&serializable=true", + Port: intstr.FromInt(2381), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 15, + PeriodSeconds: 10, + }, + */ + } + return c +} + +func apiServerContainer(kubernetesVersion string) corev1.Container { + c := corev1.Container{ + Name: "kube-apiserver", + Image: fmt.Sprintf("registry.k8s.io/kube-apiserver:%s", kubernetesVersion), + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + { + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "status.podIP", + }, + }, + }, + }, + Command: []string{ + "kube-apiserver", + "--advertise-address=$(POD_IP)", + "--allow-privileged=true", + "--authorization-mode=Node,RBAC", + "--client-ca-file=/etc/kubernetes/pki/ca.crt", + "--enable-admission-plugins=NodeRestriction", + "--enable-bootstrap-token-auth=true", + "--etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt", + "--etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt", + "--etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key", + "--etcd-servers=https://127.0.0.1:2379", + "--kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt", + "--kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key", + "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname", + "--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt", + "--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key", + "--requestheader-allowed-names=front-proxy-client", + "--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt", + "--requestheader-extra-headers-prefix=X-Remote-Extra-", + "--requestheader-group-headers=X-Remote-Group", + "--requestheader-username-headers=X-Remote-User", + "--runtime-config=", // TODO: What about this? + "--secure-port=6443", + fmt.Sprintf("--service-account-issuer=https://kubernetes.default.svc.%s", dnsDomain), + "--service-account-key-file=/etc/kubernetes/pki/sa.pub", + "--service-account-signing-key-file=/etc/kubernetes/pki/sa.key", + fmt.Sprintf("--service-cluster-ip-range=%s", serviceCIDR), + "--tls-cert-file=/etc/kubernetes/pki/apiserver.crt", + "--tls-private-key-file=/etc/kubernetes/pki/apiserver.key", + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("250m"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "etc-kubernetes", + MountPath: "/etc/kubernetes", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "api-server", + ContainerPort: 6443, + }, + }, + // TODO: enable probes + /* + StartupProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/livez", + Port: intstr.FromInt(6443), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 15, + PeriodSeconds: 10, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/readyz", + Port: intstr.FromInt(6443), + Scheme: corev1.URISchemeHTTPS, + }, + }, + TimeoutSeconds: 15, + PeriodSeconds: 1, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/livez", + Port: intstr.FromInt(6443), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 15, + PeriodSeconds: 10, + }, + */ + } + return c +} + +func schedulerContainer(kubernetesVersion string) corev1.Container { + c := corev1.Container{ + Name: "kube-scheduler", + Image: fmt.Sprintf("registry.k8s.io/kube-scheduler:%s", kubernetesVersion), + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "kube-scheduler", + "--authentication-kubeconfig=/etc/kubernetes/scheduler.conf", + "--authorization-kubeconfig=/etc/kubernetes/scheduler.conf", + "--bind-address=127.0.0.1", + "--kubeconfig=/etc/kubernetes/scheduler.conf", + "--leader-elect=true", + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "etc-kubernetes", + MountPath: "/etc/kubernetes", + }, + }, + // TODO: enable probes + /* + StartupProbe: &corev1.Probe{ + FailureThreshold: 24, + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(10259), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 15, + PeriodSeconds: 10, + }, + LivenessProbe: &corev1.Probe{ + FailureThreshold: 8, + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(10259), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 15, + PeriodSeconds: 10, + }, + */ + } + return c +} + +func controllerManagerContainer(kubernetesVersion string) corev1.Container { + c := corev1.Container{ + Name: "kube-controller-manager", + Image: fmt.Sprintf("registry.k8s.io/kube-controller-manager:%s", kubernetesVersion), + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "kube-controller-manager", + "--allocate-node-cidrs=true", + "--authentication-kubeconfig=/etc/kubernetes/controller-manager.conf", + "--authorization-kubeconfig=/etc/kubernetes/controller-manager.conf", + "--bind-address=127.0.0.1", + "--client-ca-file=/etc/kubernetes/pki/ca.crt", + fmt.Sprintf("--cluster-cidr=%s", podCIDR), + "--cluster-name=kubemark", + "--cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt", + "--cluster-signing-key-file=/etc/kubernetes/pki/ca.key", + "--controllers=*,bootstrapsigner,tokencleaner", + "--enable-hostpath-provisioner=true", + "--kubeconfig=/etc/kubernetes/controller-manager.conf", + "--leader-elect=true", + "--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt", + "--root-ca-file=/etc/kubernetes/pki/ca.crt", + "--service-account-private-key-file=/etc/kubernetes/pki/sa.key", + fmt.Sprintf("--service-cluster-ip-range=%s", serviceCIDR), + "--use-service-account-credentials=true", + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "etc-kubernetes", + MountPath: "/etc/kubernetes", + }, + }, + // TODO: enable probes + /* + StartupProbe: &corev1.Probe{ + FailureThreshold: 24, + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(10257), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 15, + PeriodSeconds: 10, + }, + LivenessProbe: &corev1.Probe{ + FailureThreshold: 8, + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(10257), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 10, + TimeoutSeconds: 15, + PeriodSeconds: 10, + }, + + */ + } + return c +} + +func debugContainer() corev1.Container { + debugContainer := corev1.Container{ + Name: "debug", + Image: "ubuntu", + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"sleep", "infinity"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "etc-kubernetes", + MountPath: "/etc/kubernetes", + }, + }, + } + return debugContainer +} diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod_files.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod_files.go new file mode 100644 index 0000000000..093574bd87 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod_files.go @@ -0,0 +1,428 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "context" + "crypto/rsa" + "crypto/x509" + "fmt" + "net" + "os" + "path/filepath" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GenerateFiles generates control plane files for the current pod. +// The implementation assumes this code to be run as init container in the control plane pod; also +// it assume that secrets with cluster certificate authorities are mirrored in the backing cluster. +// Note: we are using the manager instead of another binary for convenience (the manager is already built and packaged +// into an image that is published during the release process). +func GenerateFiles(ctx context.Context, client client.Client) error { + log := ctrl.LoggerFrom(ctx) + + // Gets the info about current pod. + podNamespace := os.Getenv("POD_NAMESPACE") + podName := os.Getenv("POD_NAME") + podIP := os.Getenv("POD_IP") + + // Gets some additional info about the cluster. + clusterName := os.Getenv("CLUSTER_NAME") + controlPlaneEndpointHost := os.Getenv("CONTROL_PLANE_ENDPOINT_HOST") + clusterKey := types.NamespacedName{Namespace: podNamespace, Name: clusterName} + + log.Info("Generating files", "POD_NAME", podName, "POD_NAMESPACE", podNamespace, "POD_IP", podIP, "CLUSTER_NAME", clusterName, "CONTROL_PLANE_ENDPOINT_HOST", controlPlaneEndpointHost) + log.Info("Generating ca, apiserver, apiserver-kubelet-client certificate files") + + ca, err := getKeyCertPair(ctx, client, clusterKey, secret.ClusterCA) + if err != nil { + return err + } + + if err := ca.WriteCertAndKey("/etc/kubernetes/pki", "ca"); err != nil { + return err + } + + if err := ca.WriteNewCertAndKey(apiServerCertificateConfig(podName, podIP, controlPlaneEndpointHost), "/etc/kubernetes/pki", "apiserver"); err != nil { + return errors.Wrap(err, "failed to create API server") + } + + if err := ca.WriteNewCertAndKey(apiServerKubeletClientCertificateConfig(), "/etc/kubernetes/pki", "apiserver-kubelet-client"); err != nil { + return errors.Wrap(err, "failed to create API server kubelet client certificate") + } + + log.Info("Generating front-proxy-ca, front-proxy-client certificate files") + + frontProxyCA, err := getKeyCertPair(ctx, client, clusterKey, secret.FrontProxyCA) + if err != nil { + return err + } + + if err := frontProxyCA.WriteCertAndKey("/etc/kubernetes/pki", "front-proxy-ca"); err != nil { + return err + } + + if err := frontProxyCA.WriteNewCertAndKey(frontProxyClientCertificateConfig(), "/etc/kubernetes/pki", "front-proxy-client"); err != nil { + return errors.Wrap(err, "failed to create front proxy client certificate") + } + + log.Info("Generating sa key files") + + serviceAccountPrivateKey, serviceAccountPublicKey, err := getPrivatePublicKeyPair(ctx, client, clusterKey, secret.ServiceAccount) + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join("/etc/kubernetes/pki", "sa.key"), serviceAccountPrivateKey, os.FileMode(0600)); err != nil { + return err + } + + if err := os.WriteFile(filepath.Join("/etc/kubernetes/pki", "sa.pub"), serviceAccountPublicKey, os.FileMode(0600)); err != nil { + return err + } + + log.Info("Generating etcd ca, server, peer, apiserver-etcd-client certificate files") + + etcd, err := getKeyCertPair(ctx, client, clusterKey, secret.EtcdCA) + if err != nil { + return err + } + + if err := etcd.WriteCertAndKey("/etc/kubernetes/pki/etcd", "ca"); err != nil { + return err + } + + if err := etcd.WriteNewCertAndKey(etcdServerCertificateConfig(podName, podIP), "/etc/kubernetes/pki/etcd", "server"); err != nil { + return errors.Wrap(err, "failed to create etcd server certificate") + } + + if err := etcd.WriteNewCertAndKey(etcdPeerCertificateConfig(podName, podIP), "/etc/kubernetes/pki/etcd", "peer"); err != nil { + return errors.Wrap(err, "failed to create etcd peer certificate") + } + + if err := etcd.WriteNewCertAndKey(apiServerEtcdClientCertificateConfig(), "/etc/kubernetes/pki", "apiserver-etcd-client"); err != nil { + return errors.Wrap(err, "failed to create API server etcd client certificate") + } + + log.Info("Generating admin, scheduler, controller-manager kubeconfig files") + + schedulerClient, err := ca.NewCertAndKey(schedulerClientCertificateConfig()) + if err != nil { + return errors.Wrap(err, "failed to create scheduler client certificate") + } + + schedulerKubeConfig := api.Config{ + Clusters: map[string]*api.Cluster{ + clusterKey.Name: { + Server: "https://127.0.0.1:6443", + CertificateAuthorityData: certs.EncodeCertPEM(ca.cert), + }, + }, + Contexts: map[string]*api.Context{ + clusterKey.Name: { + Cluster: clusterKey.Name, + AuthInfo: "scheduler", + }, + }, + AuthInfos: map[string]*api.AuthInfo{ + "scheduler": { + ClientKeyData: certs.EncodePrivateKeyPEM(schedulerClient.key), + ClientCertificateData: certs.EncodeCertPEM(schedulerClient.cert), + }, + }, + CurrentContext: clusterKey.Name, + } + if err := clientcmd.WriteToFile(schedulerKubeConfig, "/etc/kubernetes/scheduler.conf"); err != nil { + return errors.Wrap(err, "failed to serialize scheduler kubeconfig") + } + + controllerManagerClient, err := ca.NewCertAndKey(controllerManagerClientCertificateConfig()) + if err != nil { + return errors.Wrap(err, "failed to create controller manager client certificate") + } + + controllerManagerKubeConfig := api.Config{ + Clusters: map[string]*api.Cluster{ + clusterKey.Name: { + Server: "https://127.0.0.1:6443", + CertificateAuthorityData: certs.EncodeCertPEM(ca.cert), + }, + }, + Contexts: map[string]*api.Context{ + clusterKey.Name: { + Cluster: clusterKey.Name, + AuthInfo: "controller-manager", + }, + }, + AuthInfos: map[string]*api.AuthInfo{ + "controller-manager": { + ClientKeyData: certs.EncodePrivateKeyPEM(controllerManagerClient.key), + ClientCertificateData: certs.EncodeCertPEM(controllerManagerClient.cert), + }, + }, + CurrentContext: clusterKey.Name, + } + if err := clientcmd.WriteToFile(controllerManagerKubeConfig, "/etc/kubernetes/controller-manager.conf"); err != nil { + return errors.Wrap(err, "failed to serialize scheduler kubeconfig") + } + + adminClient, err := ca.NewCertAndKey(adminClientCertificateConfig()) + if err != nil { + return errors.Wrap(err, "failed to create admin client certificate") + } + + adminKubeConfig := api.Config{ + Clusters: map[string]*api.Cluster{ + clusterKey.Name: { + Server: "https://127.0.0.1:6443", + CertificateAuthorityData: certs.EncodeCertPEM(ca.cert), + }, + }, + Contexts: map[string]*api.Context{ + clusterKey.Name: { + Cluster: clusterKey.Name, + AuthInfo: "admin", + }, + }, + AuthInfos: map[string]*api.AuthInfo{ + "controller-manager": { + ClientKeyData: certs.EncodePrivateKeyPEM(adminClient.key), + ClientCertificateData: certs.EncodeCertPEM(adminClient.cert), + }, + }, + CurrentContext: clusterKey.Name, + } + if err := clientcmd.WriteToFile(adminKubeConfig, "/etc/kubernetes/admin.conf"); err != nil { + return errors.Wrap(err, "failed to serialize admin kubeconfig") + } + + log.Info("All file generated!") + return nil +} + +type KeyCertPair struct { + key *rsa.PrivateKey + cert *x509.Certificate +} + +// NewCertAndKey creates new certificate and key by passing the certificate authority certificate and key. +func (kp *KeyCertPair) NewCertAndKey(config *certs.Config) (*KeyCertPair, error) { + key, err := certs.NewPrivateKey() + if err != nil { + return nil, errors.Wrap(err, "unable to create private key") + } + + cert, err := config.NewSignedCert(key, kp.cert, kp.key) + if err != nil { + return nil, errors.Wrap(err, "unable to sign certificate") + } + + return &KeyCertPair{ + key: key, + cert: cert, + }, nil +} + +func (kp *KeyCertPair) WriteCertAndKey(path, name string) error { + if err := os.MkdirAll(path, os.FileMode(0755)); err != nil { + return err + } + if err := os.WriteFile(filepath.Join(path, fmt.Sprintf("%s.key", name)), certs.EncodePrivateKeyPEM(kp.key), os.FileMode(0600)); err != nil { + return err + } + if err := os.WriteFile(filepath.Join(path, fmt.Sprintf("%s.crt", name)), certs.EncodeCertPEM(kp.cert), os.FileMode(0600)); err != nil { + return err + } + return nil +} + +func (kp *KeyCertPair) WriteNewCertAndKey(config *certs.Config, path, name string) error { + newKP, err := kp.NewCertAndKey(config) + if err != nil { + return err + } + return newKP.WriteCertAndKey(path, name) +} + +func getKeyCertPair(ctx context.Context, client client.Client, cluster types.NamespacedName, purpose secret.Purpose) (*KeyCertPair, error) { + certificates := secret.NewCertificatesForInitialControlPlane(nil) + if err := certificates.Lookup(ctx, client, cluster); err != nil { + return nil, errors.Wrap(err, "failed to lookup certificate secrets") + } + + certificate := certificates.GetByPurpose(purpose) + if certificate == nil { + return nil, errors.Errorf("failed to lookup %s secret", purpose) + } + + signer, err := certs.DecodePrivateKeyPEM(certificate.KeyPair.Key) + if err != nil { + return nil, errors.Errorf("failed to decode key from %s secret", purpose) + } + key, ko := signer.(*rsa.PrivateKey) + if !ko { + return nil, errors.Errorf("failed key from %s secret is not a valid rsa.PrivateKey", purpose) + } + + cert, err := certs.DecodeCertPEM(certificate.KeyPair.Cert) + if err != nil { + return nil, errors.Errorf("failed to lookup key from %s secret", purpose) + } + + return &KeyCertPair{ + key: key, + cert: cert, + }, nil +} + +func getPrivatePublicKeyPair(ctx context.Context, client client.Client, cluster types.NamespacedName, purpose secret.Purpose) (privateKey []byte, publicKey []byte, _ error) { + certificates := secret.NewCertificatesForInitialControlPlane(nil) + if err := certificates.Lookup(ctx, client, cluster); err != nil { + return nil, nil, errors.Wrap(err, "failed to lookup certificate secrets") + } + + certificate := certificates.GetByPurpose(purpose) + if certificate == nil { + return nil, nil, errors.Errorf("failed to lookup %s secret", purpose) + } + + return certificate.KeyPair.Key, certificate.KeyPair.Cert, nil +} + +func apiServerCertificateConfig(podName, podIP, controlPlaneEndpointHost string) *certs.Config { + // create AltNames.DNSNames with defaults DNSNames. + altNames := &certs.AltNames{ + DNSNames: []string{ + "kubernetes", + "kubernetes.default", + "kubernetes.default.svc", + fmt.Sprintf("kubernetes.default.svc.%s", dnsDomain), + "localhost", + podName, + }, + IPs: []net.IP{ + net.IPv4(127, 0, 0, 1), + net.IPv6loopback, + net.ParseIP(podIP), + // Note: we assume this is always an in IP (the cluster service IP) + net.ParseIP(controlPlaneEndpointHost), + }, + } + + return &certs.Config{ + CommonName: "apiserver", + AltNames: *altNames, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } +} + +func schedulerClientCertificateConfig() *certs.Config { + return &certs.Config{ + CommonName: "system:kube-scheduler", + Organization: []string{}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +func controllerManagerClientCertificateConfig() *certs.Config { + return &certs.Config{ + CommonName: "system:kube-controller-manager", + Organization: []string{}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +func adminClientCertificateConfig() *certs.Config { + return &certs.Config{ + CommonName: "admin", + Organization: []string{"system:masters"}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +func apiServerEtcdClientCertificateConfig() *certs.Config { + return &certs.Config{ + CommonName: "apiserver-etcd-client", + Organization: []string{"system:masters"}, // TODO: check if we can drop + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +func apiServerKubeletClientCertificateConfig() *certs.Config { + return &certs.Config{ + CommonName: "apiserver-kubelet-client", + Organization: []string{"system:masters"}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +func frontProxyClientCertificateConfig() *certs.Config { + return &certs.Config{ + CommonName: "front-proxy-client", + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +func etcdServerCertificateConfig(podName, podIP string) *certs.Config { + // create AltNames with defaults DNSNames, IPs. + altNames := certs.AltNames{ + DNSNames: []string{ + "localhost", + podName, + }, + IPs: []net.IP{ + net.IPv4(127, 0, 0, 1), + net.IPv6loopback, + net.ParseIP(podIP), + }, + } + + return &certs.Config{ + CommonName: podName, + AltNames: altNames, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + } +} + +func etcdPeerCertificateConfig(podName, podIP string) *certs.Config { + // create AltNames with defaults DNSNames, IPs. + altNames := certs.AltNames{ + DNSNames: []string{ + "localhost", + podName, + }, + IPs: []net.IP{ + net.IPv4(127, 0, 0, 1), + net.IPv6loopback, + net.ParseIP(podIP), + }, + } + + return &certs.Config{ + CommonName: podName, + AltNames: altNames, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + } +} diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod_test.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod_test.go new file mode 100644 index 0000000000..62956a191f --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_pod_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import "testing" + +func TestCASecretsHandler(t *testing.T) { + // TODO +} + +func TestKubeConfigSecretHandler(t *testing.T) { + // TODO +} + +func TestControlPlanePodHandler(t *testing.T) { + // TODO +} + +func TestGenerateFiles(t *testing.T) { + // TODO +} diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_service.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_service.go new file mode 100644 index 0000000000..2d779ff083 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_service.go @@ -0,0 +1,137 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +const ( + apiServerPodPort = 6443 + lbServicePort = 6443 +) + +// lbServiceHandler implement handling for the Kubernetes Service acting as a load balancer in front of all the control plane instances. +type lbServiceHandler struct { + // TODO: in a follow up iteration we want to make it possible to store those objects in a dedicate ns on a separated cluster + // this brings in the limitation that objects for two clusters with the same name cannot be hosted in a single namespace as well as the need to rethink owner references. + client client.Client + + controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint +} + +func (lb *lbServiceHandler) ObjectKey() client.ObjectKey { + return client.ObjectKey{ + Namespace: lb.controlPlaneEndpoint.Namespace, + Name: fmt.Sprintf("%s-lb", lb.controlPlaneEndpoint.Name), + } +} + +func (lb *lbServiceHandler) LookupOrGenerate(ctx context.Context) (*corev1.Service, error) { + // Lookup the load balancer service. + svc, err := lb.Lookup(ctx) + if err != nil { + return nil, err + } + if svc != nil { + return svc, nil + } + return lb.Generate(ctx) +} + +func (lb *lbServiceHandler) Lookup(ctx context.Context) (*corev1.Service, error) { + key := lb.ObjectKey() + secret := &corev1.Service{} + if err := lb.client.Get(ctx, key, secret); err != nil { + if apierrors.IsNotFound(err) { + return nil, nil + } + return nil, errors.Wrapf(err, "failed to get load balance service") + } + return secret, nil +} + +func (lb *lbServiceHandler) Generate(ctx context.Context) (*corev1.Service, error) { + key := lb.ObjectKey() + secret := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + // Note: the code is taking care of service cleanup during the deletion workflow, + // so this ownerRef is mostly used to express a semantic relation. + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vcsimv1.GroupVersion.String(), + Kind: "ControlPlaneEndpoint", + Name: lb.controlPlaneEndpoint.Name, + UID: lb.controlPlaneEndpoint.UID, + Controller: pointer.Bool(true), + BlockOwnerDeletion: pointer.Bool(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + // This selector must match labels on apiServerPods. + Selector: map[string]string{ + "control-plane-endpoint.vcsim.infrastructure.cluster.x-k8s.io": lb.controlPlaneEndpoint.Name, + }, + // Currently we support only services of type IP, also + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Port: lbServicePort, + TargetPort: intstr.FromInt(apiServerPodPort), + }, + }, + }, + } + if err := lb.client.Create(ctx, secret); err != nil { + if apierrors.IsAlreadyExists(err) { + return nil, err + } + return nil, errors.Wrapf(err, "failed to create load balance service") + } + return secret, nil +} + +func (lb *lbServiceHandler) Delete(ctx context.Context) error { + key := lb.ObjectKey() + secret := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + } + if err := lb.client.Delete(ctx, secret); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return errors.Wrapf(err, "failed to delete load balance service") + } + return nil +} diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_service_test.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_service_test.go new file mode 100644 index 0000000000..b5afade5f6 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplane_service_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "fmt" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +var ( + testScheme = runtime.NewScheme() + ctx = ctrl.SetupSignalHandler() +) + +func init() { + _ = corev1.AddToScheme(testScheme) + _ = vcsimv1.AddToScheme(testScheme) +} + +func TestLBServiceHandler(t *testing.T) { + t.Run("Test Generate, Lookup, Delete", func(t *testing.T) { + g := NewWithT(t) + + lb := lbServiceHandler{ + client: fake.NewClientBuilder().WithScheme(testScheme).Build(), + controlPlaneEndpoint: &vcsimv1.ControlPlaneEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test", + }, + }, + } + + // Generate + svc1, err := lb.Generate(ctx) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(svc1).ToNot(BeNil()) + + g.Expect(svc1.Name).To(Equal(fmt.Sprintf("%s-lb", lb.controlPlaneEndpoint.Name))) + g.Expect(svc1.Namespace).To(Equal(lb.controlPlaneEndpoint.Namespace)) + g.Expect(svc1.OwnerReferences).To(HaveLen(1)) + g.Expect(svc1.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + g.Expect(svc1.Spec.Ports).To(ConsistOf(corev1.ServicePort{Port: lbServicePort, TargetPort: intstr.FromInt(apiServerPodPort)})) + + // Fake ClusterIP address being assigned + patch := client.MergeFrom(svc1.DeepCopy()) + svc1.Spec.ClusterIP = "1.2.3.4" + g.Expect(lb.client.Patch(ctx, svc1, patch)).To(Succeed()) + + // Lookup + svc2, err := lb.Lookup(ctx) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(svc2).ToNot(BeNil()) + + g.Expect(svc1.Spec.ClusterIP).To(Equal("1.2.3.4")) + + // Delete + err = lb.Delete(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + svc3 := &corev1.Service{} + err = lb.client.Get(ctx, lb.ObjectKey(), svc3) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + t.Run("Test LookupOrGenerate", func(t *testing.T) { + g := NewWithT(t) + + lb := lbServiceHandler{ + client: fake.NewClientBuilder().WithScheme(testScheme).Build(), + controlPlaneEndpoint: &vcsimv1.ControlPlaneEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test", + }, + }, + } + + // LookupOrGenerate must create if the service is not already there + svc1, err := lb.LookupOrGenerate(ctx) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(svc1).ToNot(BeNil()) + + g.Expect(svc1.Name).To(Equal(fmt.Sprintf("%s-lb", lb.controlPlaneEndpoint.Name))) + g.Expect(svc1.Namespace).To(Equal(lb.controlPlaneEndpoint.Namespace)) + g.Expect(svc1.OwnerReferences).To(HaveLen(1)) + g.Expect(svc1.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + g.Expect(svc1.Spec.Ports).To(ConsistOf(corev1.ServicePort{Port: lbServicePort, TargetPort: intstr.FromInt(apiServerPodPort)})) + + // Fake ClusterIP address being assigned + patch := client.MergeFrom(svc1.DeepCopy()) + svc1.Spec.ClusterIP = "1.2.3.4" + g.Expect(lb.client.Patch(ctx, svc1, patch)).To(Succeed()) + + // LookupOrGenerate must read if the service already there + svc2, err := lb.LookupOrGenerate(ctx) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(svc2).ToNot(BeNil()) + + g.Expect(svc2.Spec.ClusterIP).To(Equal("1.2.3.4")) + }) +} diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplaneendpoint.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplaneendpoint.go new file mode 100644 index 0000000000..db61e70f53 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/controlplaneendpoint.go @@ -0,0 +1,90 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "context" + + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +type ControlPlaneEndpointReconciler struct { + Client client.Client +} + +func (r *ControlPlaneEndpointReconciler) ReconcileNormal(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // If the controlPlaneEndpoint is already set then we are done. + // Note: the controlPlaneEndpoint doesn't have the capability to recover from the manual deletion of the service, + // but this is considered acceptable for vcsim testing. + if controlPlaneEndpoint.Status.Host != "" { + return ctrl.Result{}, nil + } + + // Get the load balancer service. + log.Info("Creating the Kubernetes Service acting as a cluster load balancer") + s := lbServiceHandler{ + client: r.Client, + controlPlaneEndpoint: controlPlaneEndpoint, + } + + svc, err := s.LookupOrGenerate(ctx) + if err != nil { + return ctrl.Result{}, err + } + + // Wait for the cluster IP to show up. + if svc.Spec.ClusterIP == "" { + return ctrl.Result{Requeue: true}, err + } + + // If service ports are not as expected, delete the service at best effort + // Note: this should never happen (it happens if someone change the service while being created or immediately after). + if len(svc.Spec.Ports) != 1 { + _ = s.Delete(ctx) + return ctrl.Result{}, errors.Errorf("service doesn't have the expected port") + } + + controlPlaneEndpoint.Status.Host = svc.Spec.ClusterIP + controlPlaneEndpoint.Status.Port = int32(svc.Spec.Ports[0].Port) + return ctrl.Result{}, nil +} + +func (r *ControlPlaneEndpointReconciler) ReconcileDelete(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + log.Info("Deleting the Kubernetes Service acting as a load balancer in front of all the control plane instances") + s := lbServiceHandler{ + client: r.Client, + controlPlaneEndpoint: controlPlaneEndpoint, + } + + if err := s.Delete(ctx); err != nil { + return ctrl.Result{}, err + } + log.Info("Cluster infrastructure deleted") + + controllerutil.RemoveFinalizer(controlPlaneEndpoint, vcsimv1.ControlPlaneEndpointFinalizer) + + return ctrl.Result{}, nil +} diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/doc.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/doc.go new file mode 100644 index 0000000000..59e49bb80f --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubernetes implements backend for ControlPlaneEndpoint and VirtualMachines implemented using Kubernetes constructs like +// services and pods. +package kubernetes diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/virtualmachine.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/virtualmachine.go new file mode 100644 index 0000000000..9fa20a77eb --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/virtualmachine.go @@ -0,0 +1,214 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "context" + "time" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + capiutil "sigs.k8s.io/cluster-api/util" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +type VirtualMachineReconciler struct { + Client client.Client + + IsVMReady func() bool +} + +func (r *VirtualMachineReconciler) ReconcileNormal(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Make sure bootstrap data is available and populated. + // NOTE: we are not using bootstrap data, but we wait for it in order to simulate a real machine provisioning workflow. + if machine.Spec.Bootstrap.DataSecretName == nil { + if !util.IsControlPlaneMachine(machine) && !v1beta1conditions.IsTrue(cluster, clusterv1beta1.ControlPlaneInitializedCondition) { + log.Info("Waiting for the control plane to be initialized") + return reconcile.Result{RequeueAfter: 5 * time.Second}, nil // keep requeueing since we don't have a watch on machines // TODO: check if we can avoid this + } + + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + return reconcile.Result{RequeueAfter: 5 * time.Second}, nil // keep requeueing since we don't have a watch on machines // TODO: check if we can avoid this + } + + // Check if the infrastructure is ready and the Bios UUID to be set (required for computing the Provide ID), otherwise return and wait for the vsphereVM object to be updated + if !r.IsVMReady() { + log.Info("Waiting for machine infrastructure to become ready") + return reconcile.Result{}, nil // TODO: check if we can avoid this + } + + // Call the inner reconciliation methods. + phases := []func(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (ctrl.Result, error){ + r.reconcileCertificates, + r.reconcileKubeConfig, + r.reconcilePods, + } + + res := ctrl.Result{} + errs := make([]error, 0) + for _, phase := range phases { + phaseResult, err := phase(ctx, cluster, machine, virtualMachine) + if err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { + continue + } + res = capiutil.LowestNonZeroResult(res, phaseResult) + } + return res, kerrors.NewAggregate(errs) +} + +// reconcileCertificates reconcile the cluster certificates in the management cluster, as required by the CAPI contract. +// TODO: change the implementation so we have logs when creating, we fail if certificates are missing after CP has been generated. +func (r *VirtualMachineReconciler) reconcileCertificates(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("DEBUG: reconcileCertificates") + + secretHandler := caSecretHandler{ + client: r.Client, + cluster: cluster, + virtualMachine: virtualMachine, + virtualMachineGVK: virtualMachine.GetObjectKind().GroupVersionKind(), // FIXME: gvk is not always set, infer it from schema. + } + + if err := secretHandler.LookupOrGenerate(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to generate cluster's certificate authorities") + } + return ctrl.Result{}, nil +} + +// reconcileKubeConfig reconcile the cluster admin kubeconfig in the management cluster, as required by the CAPI contract. +// TODO: change the implementation so we have logs when creating +func (r *VirtualMachineReconciler) reconcileKubeConfig(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("DEBUG: reconcileKubeConfig") + // If the secret with the CA is not yet in cache, wait fo in a bit before giving up. + if err := wait.PollUntilContextTimeout(ctx, 250*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) { + if _, err := secret.GetFromNamespacedName(ctx, r.Client, client.ObjectKeyFromObject(cluster), secret.ClusterCA); err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil + }); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to read cluster CA while generating admin kubeconfig") + } + + secretHandler := kubeConfigSecretHandler{ + client: r.Client, + cluster: cluster, + virtualMachine: virtualMachine, + virtualMachineGVK: virtualMachine.GetObjectKind().GroupVersionKind(), // FIXME: gvk is not always set, infer it from schema. + } + + // Note: the kubemarkControlPlane doesn't support implement kubeconfig client certificate renewal, + // but this is considered acceptable for the goals of the kubemark provider. + if err := secretHandler.LookupOrGenerate(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to generate secret with the cluster's admin kubeconfig") + } + return ctrl.Result{}, nil +} + +// reconcilePods reconcile pods hosting a control plane replicas. +// Note: The implementation currently manage one replica without remediation support, but there is already part of +// scaffolding for implementing support for n replicas. +// TODO: implement, support for n replicas, remediation +func (r *VirtualMachineReconciler) reconcilePods(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("DEBUG: reconcilePods") + + podHandler := controlPlanePodHandler{ + client: r.Client, + cluster: cluster, + controlPlaneEndpoint: nil, // FIXME: fetch the controlPlaneEndpoint + virtualMachine: virtualMachine, + virtualMachineGVK: virtualMachine.GetObjectKind().GroupVersionKind(), // FIXME: gvk is not always set, infer it from schema. + } + + // Create RBAC rules for the pod to run. + if err := podHandler.LookupAndGenerateRBAC(ctx); err != nil { + return ctrl.Result{}, err + } + + // Gets the list of pods hosting a control plane replicas. + pods, err := podHandler.GetPods(ctx) + if err != nil { + return ctrl.Result{}, err + } + + if len(pods.Items) < 1 { + log.Info("Scaling up control plane replicas to 1") + if err := podHandler.Generate(ctx, *machine.Spec.Version); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to generate control plane pod") + } + // Requeue so we can refresh the list of pods hosting a control plane replicas. + return ctrl.Result{Requeue: true}, nil + } + + // Wait for the pod to become running. + log.Info("Waiting for Control plane pods to become running") + // TODO: watch for CP pods in the backing cluster and drop requeueAfter + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil +} + +func (r *VirtualMachineReconciler) ReconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (_ ctrl.Result, reterr error) { + podHandler := controlPlanePodHandler{ + client: r.Client, + cluster: cluster, + controlPlaneEndpoint: nil, // FIXME: fetch the controlPlaneEndpoint + virtualMachine: virtualMachine, + virtualMachineGVK: virtualMachine.GetObjectKind().GroupVersionKind(), // FIXME: gvk is not always set, infer it from schema. + } + + // Delete all pods + pods, err := podHandler.GetPods(ctx) + if err != nil { + return ctrl.Result{}, err + } + + for _, pod := range pods.Items { + if err := podHandler.Delete(ctx, pod.Name); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrap(err, "failed to delete control plane pod") + } + } + } + + // TODO: Cleanup RBAC (might be they should be renamed by Cluster) + + // TODO: Delete kubeconfig? it should go away via garbage collector... + + // TODO: Delete all secrets? it should go away via garbage collector... + + controllerutil.RemoveFinalizer(virtualMachine, vcsimv1.VMFinalizer) + return ctrl.Result{}, nil +} diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/worker_pod.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/worker_pod.go new file mode 100644 index 0000000000..498b6fec4f --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/worker_pod.go @@ -0,0 +1,44 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "context" +) + +// workerPodHandler implement handling for the Pod hosting a minimal Kubernetes worker. +type workerPodHandler struct { + // TODO: implement using kubemark or virtual kubelet. + // kubermark seems the best fit + // virtual kubelet with the mock provider seems a possible alternative, but I don't know if the mock providers has limitations that might limit usage. + // virtual kubelet with other providers seems overkill in this phase +} + +func (p *workerPodHandler) LookupAndGenerateRBAC(ctx context.Context) error { + + return nil +} + +func (p *workerPodHandler) Generate(ctx context.Context, kubernetesVersion string) error { + + return nil +} + +func (p *workerPodHandler) Delete(ctx context.Context, podName string) error { + + return nil +} diff --git a/test/infrastructure/vcsim/controllers/backends/kubernetes/worker_pod_test.go b/test/infrastructure/vcsim/controllers/backends/kubernetes/worker_pod_test.go new file mode 100644 index 0000000000..1c2a30aa38 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/kubernetes/worker_pod_test.go @@ -0,0 +1,23 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import "testing" + +func TestWorkerPodHandler(t *testing.T) { + // TODO +} diff --git a/test/infrastructure/vcsim/controllers/backends/virtualmachine.go b/test/infrastructure/vcsim/controllers/backends/virtualmachine.go new file mode 100644 index 0000000000..77c84ce35a --- /dev/null +++ b/test/infrastructure/vcsim/controllers/backends/virtualmachine.go @@ -0,0 +1,30 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backends + +import ( + "context" + + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type VirtualMachineReconciler interface { + ReconcileNormal(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (ctrl.Result, error) + ReconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine client.Object) (ctrl.Result, error) +} diff --git a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go index c7c04b4acb..0381781a0f 100644 --- a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go +++ b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go @@ -22,7 +22,6 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/klog/v2" inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" @@ -31,9 +30,10 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/backends" + inmemorybackend "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/backends/inmemory" ) type ControlPlaneEndpointReconciler struct { @@ -78,52 +78,23 @@ func (r *ControlPlaneEndpointReconciler) Reconcile(ctx context.Context, req ctrl } }() + backendReconciler := r.backendReconcilerFactory(ctx, controlPlaneEndpoint) + // Handle deleted machines if !controlPlaneEndpoint.DeletionTimestamp.IsZero() { - return ctrl.Result{}, r.reconcileDelete(ctx, controlPlaneEndpoint) + return backendReconciler.ReconcileDelete(ctx, controlPlaneEndpoint) } // Handle non-deleted machines - return ctrl.Result{}, r.reconcileNormal(ctx, controlPlaneEndpoint) + return backendReconciler.ReconcileNormal(ctx, controlPlaneEndpoint) } -func (r *ControlPlaneEndpointReconciler) reconcileNormal(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) error { - log := ctrl.LoggerFrom(ctx) - log.Info("Reconciling VCSim ControlPlaneEndpoint") - - // Initialize a listener for the workload cluster. - // IMPORTANT: The fact that both the listener and the resourceGroup for a workload cluster have - // the same name is used as assumptions in other part of the implementation. - listenerName := klog.KObj(controlPlaneEndpoint).String() - listener, err := r.APIServerMux.InitWorkloadClusterListener(listenerName) - if err != nil { - return errors.Wrapf(err, "failed to init the listener for the control plane endpoint") +func (r *ControlPlaneEndpointReconciler) backendReconcilerFactory(_ context.Context, _ *vcsimv1.ControlPlaneEndpoint) backends.ControlPlaneEndpointReconciler { + return &inmemorybackend.ControlPlaneEndpointReconciler{ + InMemoryManager: r.InMemoryManager, + APIServerMux: r.APIServerMux, + PodIP: r.PodIP, } - - controlPlaneEndpoint.Status.Host = r.PodIP // NOTE: we are replacing the listener ip with the pod ip so it will be accessible from other pods as well - controlPlaneEndpoint.Status.Port = listener.Port() - - return nil -} - -func (r *ControlPlaneEndpointReconciler) reconcileDelete(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) error { - log := ctrl.LoggerFrom(ctx) - log.Info("Reconciling delete VCSim ControlPlaneEndpoint") - listenerName := klog.KObj(controlPlaneEndpoint).String() - - // Delete the resource group hosting all the cloud resources belonging the workload cluster; - if resourceGroup, err := r.APIServerMux.ResourceGroupByWorkloadCluster(listenerName); err == nil { - r.InMemoryManager.DeleteResourceGroup(resourceGroup) - } - - // Delete the listener for the workload cluster; - if err := r.APIServerMux.DeleteWorkloadClusterListener(listenerName); err != nil { - return errors.Wrapf(err, "failed to delete the listener for the control plane endpoint") - } - - controllerutil.RemoveFinalizer(controlPlaneEndpoint, vcsimv1.ControlPlaneEndpointFinalizer) - - return nil } // SetupWithManager will add watches for this controller. diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go index a2e4ff0ba0..1371e4da9d 100644 --- a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go @@ -23,9 +23,7 @@ import ( "github.com/pkg/errors" vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" @@ -37,16 +35,16 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" - infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/vmoperator" vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/backends" + inmemorybackend "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/backends/inmemory" ) type VirtualMachineReconciler struct { @@ -140,83 +138,6 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque log = log.WithValues("VSphereCluster", klog.KObj(vsphereCluster)) ctx = ctrl.LoggerInto(ctx, log) - // Compute the resource group unique name. - resourceGroup := klog.KObj(cluster).String() - r.InMemoryManager.AddResourceGroup(resourceGroup) - - inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() - - // Create default Namespaces. - for _, nsName := range []string{metav1.NamespaceDefault, metav1.NamespacePublic, metav1.NamespaceSystem} { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: nsName, - Labels: map[string]string{ - "kubernetes.io/metadata.name": nsName, - }, - }, - } - - if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(ns), ns); err != nil { - if !apierrors.IsNotFound(err) { - return ctrl.Result{}, errors.Wrapf(err, "failed to get %s Namespace", nsName) - } - - if err := inmemoryClient.Create(ctx, ns); err != nil && !apierrors.IsAlreadyExists(err) { - return ctrl.Result{}, errors.Wrapf(err, "failed to create %s Namespace", nsName) - } - } - } - - if _, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup); err != nil { - l := &vcsimv1.ControlPlaneEndpointList{} - if err := r.Client.List(ctx, l); err != nil { - return ctrl.Result{}, err - } - found := false - for _, c := range l.Items { - c := c - if c.Status.Host != cluster.Spec.ControlPlaneEndpoint.Host || c.Status.Port != cluster.Spec.ControlPlaneEndpoint.Port { - continue - } - - listenerName := klog.KObj(&c).String() - log.Info("Registering ResourceGroup for ControlPlaneEndpoint", "ResourceGroup", resourceGroup, "ControlPlaneEndpoint", listenerName) - err := r.APIServerMux.RegisterResourceGroup(listenerName, resourceGroup) - if err != nil { - return ctrl.Result{}, err - } - found = true - break - } - if !found { - return ctrl.Result{}, errors.Errorf("unable to find a ControlPlaneEndpoint for host %s, port %d", cluster.Spec.ControlPlaneEndpoint.Host, cluster.Spec.ControlPlaneEndpoint.Port) - } - } - - // Check if there is a conditionsTracker in the resource group. - // The conditionsTracker is an object stored in memory with the scope of storing conditions used for keeping - // track of the provisioning process of the fake node, etcd, api server, etc for this specific virtualMachine. - // (the process managed by this controller). - // NOTE: The type of the in memory conditionsTracker object doesn't matter as soon as it implements Cluster API's conditions interfaces. - // Unfortunately vmoprv1.VirtualMachine isn't a condition getter, so we fallback on using a infrav1.VSphereVM. - conditionsTracker := &infrav1.VSphereVM{} - if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker); err != nil { - if !apierrors.IsNotFound(err) { - return ctrl.Result{}, errors.Wrap(err, "failed to get conditionsTracker") - } - - conditionsTracker = &infrav1.VSphereVM{ - ObjectMeta: metav1.ObjectMeta{ - Name: virtualMachine.Name, - Namespace: virtualMachine.Namespace, - }, - } - if err := inmemoryClient.Create(ctx, conditionsTracker); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to create conditionsTracker") - } - } - // Initialize the patch helper patchHelper, err := patch.NewHelper(virtualMachine, r.Client) if err != nil { @@ -229,91 +150,48 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque if err := patchHelper.Patch(ctx, virtualMachine); err != nil { reterr = kerrors.NewAggregate([]error{reterr, err}) } - - // NOTE: Patch on conditionsTracker will only track of provisioning process of the fake node, etcd, api server, etc. - if err := inmemoryClient.Update(ctx, conditionsTracker); err != nil { - reterr = kerrors.NewAggregate([]error{reterr, err}) - } }() + backendReconciler := r.backendReconcilerFactory(ctx, cluster, machine, virtualMachine) + // Handle deleted machines if !vSphereMachine.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, cluster, machine, virtualMachine, conditionsTracker) + return backendReconciler.ReconcileDelete(ctx, cluster, machine, virtualMachine) } // Handle non-deleted machines - return r.reconcileNormal(ctx, cluster, machine, virtualMachine, conditionsTracker) + return backendReconciler.ReconcileNormal(ctx, cluster, machine, virtualMachine) } -func (r *VirtualMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine *vmoprv1.VirtualMachine, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { - ipReconciler := r.getVMIpReconciler(cluster, virtualMachine) - if ret, err := ipReconciler.ReconcileIP(ctx); !ret.IsZero() || err != nil { - return ret, err - } - - bootstrapReconciler := r.getVMBootstrapReconciler(virtualMachine) - if ret, err := bootstrapReconciler.reconcileBoostrap(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { - return ret, err - } - - return ctrl.Result{}, nil -} - -func (r *VirtualMachineReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine *vmoprv1.VirtualMachine, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { - bootstrapReconciler := r.getVMBootstrapReconciler(virtualMachine) - if ret, err := bootstrapReconciler.reconcileDelete(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { - return ret, err - } - - controllerutil.RemoveFinalizer(virtualMachine, vcsimv1.VMFinalizer) - return ctrl.Result{}, nil -} - -func (r *VirtualMachineReconciler) getVMIpReconciler(cluster *clusterv1beta1.Cluster, virtualMachine *vmoprv1.VirtualMachine) *vmIPReconciler { - return &vmIPReconciler{ - Client: r.Client, - - // Type specific functions; those functions wraps the differences between govmomi and supervisor types, - // thus allowing to use the same vmIPReconciler in both scenarios. +func (r *VirtualMachineReconciler) backendReconcilerFactory(_ context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine *vmoprv1.VirtualMachine) backends.VirtualMachineReconciler { + return &inmemorybackend.VirtualMachineReconciler{ + Client: r.Client, + InMemoryManager: r.InMemoryManager, + APIServerMux: r.APIServerMux, + GetProviderID: func() string { + // Computes the ProviderID for the node hosted on the virtualMachine + return util.ConvertUUIDToProviderID(virtualMachine.Status.BiosUUID) + }, GetVCenterSession: func(ctx context.Context) (*session.Session, error) { // Return a connection to the vCenter where the virtualMachine is hosted - return r.getVCenterSession(ctx) - }, - IsVMWaitingforIP: func() bool { - // A virtualMachine is waiting for an IP when PoweredOn but without an Ip. - return virtualMachine.Status.PowerState == vmoprv1.VirtualMachinePowerStateOn && (virtualMachine.Status.Network == nil || (virtualMachine.Status.Network.PrimaryIP4 == "" && virtualMachine.Status.Network.PrimaryIP6 == "")) + return vmoperator.GetVCenterSession(ctx, r.Client) }, GetVMPath: func() string { // The vm operator always create VMs under a sub-folder with named like the cluster. datacenter := 0 return vcsimhelpers.VMPath(datacenter, path.Join(cluster.Name, virtualMachine.Name)) }, - } -} - -func (r *VirtualMachineReconciler) getVMBootstrapReconciler(virtualMachine *vmoprv1.VirtualMachine) *vmBootstrapReconciler { - return &vmBootstrapReconciler{ - Client: r.Client, - InMemoryManager: r.InMemoryManager, - APIServerMux: r.APIServerMux, - - // Type specific functions; those functions wraps the differences between govmomi and supervisor types, - // thus allowing to use the same vmBootstrapReconciler in both scenarios. IsVMReady: func() bool { // A virtualMachine is ready to provision fake objects hosted on it when PoweredOn, with a primary Ip assigned and BiosUUID is set (bios id is required when provisioning the node to compute the Provider ID). return virtualMachine.Status.PowerState == vmoprv1.VirtualMachinePowerStateOn && virtualMachine.Status.Network != nil && (virtualMachine.Status.Network.PrimaryIP4 != "" || virtualMachine.Status.Network.PrimaryIP6 != "") && virtualMachine.Status.BiosUUID != "" }, - GetProviderID: func() string { - // Computes the ProviderID for the node hosted on the virtualMachine - return util.ConvertUUIDToProviderID(virtualMachine.Status.BiosUUID) + IsVMWaitingforIP: func() bool { + // A virtualMachine is waiting for an IP when PoweredOn but without an Ip. + return virtualMachine.Status.PowerState == vmoprv1.VirtualMachinePowerStateOn && (virtualMachine.Status.Network == nil || (virtualMachine.Status.Network.PrimaryIP4 == "" && virtualMachine.Status.Network.PrimaryIP6 == "")) }, } } -func (r *VirtualMachineReconciler) getVCenterSession(ctx context.Context) (*session.Session, error) { - return vmoperator.GetVCenterSession(ctx, r.Client) -} - // SetupWithManager will add watches for this controller. func (r *VirtualMachineReconciler) SetupWithManager(_ context.Context, mgr ctrl.Manager, options controller.Options) error { err := ctrl.NewControllerManagedBy(mgr). diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go index f998e0e1fa..abfe07b35d 100644 --- a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go @@ -39,6 +39,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" + inmemorybackend "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/backends/inmemory" ) func Test_Reconcile_VirtualMachine(t *testing.T) { @@ -53,27 +54,27 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { }, } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", UID: "bar", }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: vmwarev1.GroupVersion.Group, - Kind: "VSphereCluster", - Name: vsphereCluster.Name, + Spec: clusterv1beta1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: vmwarev1.GroupVersion.String(), + Kind: "VSphereCluster", + Name: vsphereCluster.Name, }, }, } - machine := &clusterv1.Machine{ + machine := &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", Labels: map[string]string{ - clusterv1.ClusterNameLabel: cluster.Name, + clusterv1beta1.ClusterNameLabel: cluster.Name, }, }, } @@ -158,10 +159,10 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker) g.Expect(err).ToNot(HaveOccurred()) - c := v1beta1conditions.Get(conditionsTracker, VMProvisionedCondition) + c := v1beta1conditions.Get(conditionsTracker, inmemorybackend.VMProvisionedCondition) g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) g.Expect(c.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) - g.Expect(c.Reason).To(Equal(WaitingControlPlaneInitializedReason)) + g.Expect(c.Reason).To(Equal(inmemorybackend.WaitingControlPlaneInitializedReason)) }) t.Run("VirtualMachine provisioned gets a node (worker)", func(t *testing.T) { @@ -175,31 +176,31 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { }, } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", UID: "bar", }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: vmwarev1.GroupVersion.Group, - Kind: "VSphereCluster", - Name: vsphereCluster.Name, + Spec: clusterv1beta1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: vmwarev1.GroupVersion.String(), + Kind: "VSphereCluster", + Name: vsphereCluster.Name, }, }, } - machine := &clusterv1.Machine{ + machine := &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", Labels: map[string]string{ - clusterv1.ClusterNameLabel: cluster.Name, + clusterv1beta1.ClusterNameLabel: cluster.Name, }, }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ + Spec: clusterv1beta1.MachineSpec{ + Bootstrap: clusterv1beta1.Bootstrap{ DataSecretName: ptr.To("foo"), // this unblocks node provisioning }, }, @@ -281,7 +282,7 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { } // Reconcile - nodeStartupDuration = 0 * time.Second + inmemorybackend.NodeStartupDuration = 0 * time.Second res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ Namespace: virtualMachine.Namespace, @@ -296,7 +297,7 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker) g.Expect(err).ToNot(HaveOccurred()) - c := v1beta1conditions.Get(conditionsTracker, NodeProvisionedCondition) + c := v1beta1conditions.Get(conditionsTracker, inmemorybackend.NodeProvisionedCondition) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) }) } diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller.go b/test/infrastructure/vcsim/controllers/vspherevm_controller.go index fee9d6fdb9..d805bf3d0a 100644 --- a/test/infrastructure/vcsim/controllers/vspherevm_controller.go +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller.go @@ -21,7 +21,6 @@ import ( "fmt" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -39,7 +38,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -47,6 +45,8 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/backends" + inmemorybackend "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/backends/inmemory" ) // TODO: implement support for CAPV deployed in arbitrary ns (TBD if we need this). @@ -144,82 +144,6 @@ func (r *VSphereVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( log = log.WithValues("VSphereCluster", klog.KObj(vSphereCluster)) ctx = ctrl.LoggerInto(ctx, log) - // Compute the resource group unique name. - resourceGroup := klog.KObj(cluster).String() - r.InMemoryManager.AddResourceGroup(resourceGroup) - - inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() - - // Create default Namespaces. - for _, nsName := range []string{metav1.NamespaceDefault, metav1.NamespacePublic, metav1.NamespaceSystem} { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: nsName, - Labels: map[string]string{ - "kubernetes.io/metadata.name": nsName, - }, - }, - } - - if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(ns), ns); err != nil { - if !apierrors.IsNotFound(err) { - return ctrl.Result{}, errors.Wrapf(err, "failed to get %s Namespace", nsName) - } - - if err := inmemoryClient.Create(ctx, ns); err != nil && !apierrors.IsAlreadyExists(err) { - return ctrl.Result{}, errors.Wrapf(err, "failed to create %s Namespace", nsName) - } - } - } - - if _, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup); err != nil { - l := &vcsimv1.ControlPlaneEndpointList{} - if err := r.Client.List(ctx, l); err != nil { - return ctrl.Result{}, err - } - found := false - for _, c := range l.Items { - c := c - if c.Status.Host != cluster.Spec.ControlPlaneEndpoint.Host || c.Status.Port != cluster.Spec.ControlPlaneEndpoint.Port { - continue - } - - listenerName := klog.KObj(&c).String() - log.Info("Registering ResourceGroup for ControlPlaneEndpoint", "ResourceGroup", resourceGroup, "ControlPlaneEndpoint", listenerName) - err := r.APIServerMux.RegisterResourceGroup(listenerName, resourceGroup) - if err != nil { - return ctrl.Result{}, err - } - found = true - break - } - if !found { - return ctrl.Result{}, errors.Errorf("unable to find a ControlPlaneEndpoint for host %s, port %d", cluster.Spec.ControlPlaneEndpoint.Host, cluster.Spec.ControlPlaneEndpoint.Port) - } - } - - // Check if there is a conditionsTracker in the resource group. - // The conditionsTracker is an object stored in memory with the scope of storing conditions used for keeping - // track of the provisioning process of the fake node, etcd, api server, etc for this specific vSphereVM. - // (the process managed by this controller). - // NOTE: The type of the in memory conditionsTracker object doesn't matter as soon as it implements Cluster API's conditions interfaces. - conditionsTracker := &infrav1.VSphereVM{} - if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(vSphereVM), conditionsTracker); err != nil { - if !apierrors.IsNotFound(err) { - return ctrl.Result{}, errors.Wrap(err, "failed to get conditionsTracker") - } - - conditionsTracker = &infrav1.VSphereVM{ - ObjectMeta: metav1.ObjectMeta{ - Name: vSphereVM.Name, - Namespace: vSphereVM.Namespace, - }, - } - if err := inmemoryClient.Create(ctx, conditionsTracker); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to create conditionsTracker") - } - } - // Initialize the patch helper patchHelper, err := patch.NewHelper(vSphereVM, r.Client) if err != nil { @@ -232,82 +156,43 @@ func (r *VSphereVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( if err := patchHelper.Patch(ctx, vSphereVM); err != nil { reterr = kerrors.NewAggregate([]error{reterr, err}) } - - // NOTE: Patch on conditionsTracker will only track of provisioning process of the fake node, etcd, api server, etc. - if err := inmemoryClient.Update(ctx, conditionsTracker); err != nil { - reterr = kerrors.NewAggregate([]error{reterr, err}) - } }() + backendReconciler := r.backendReconcilerFactory(ctx, vSphereCluster, vSphereVM) + // Handle deleted machines if !vSphereMachine.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, cluster, vSphereCluster, machine, vSphereVM, conditionsTracker) + return backendReconciler.ReconcileDelete(ctx, cluster, machine, vSphereVM) } // Handle non-deleted machines - return r.reconcileNormal(ctx, cluster, vSphereCluster, machine, vSphereVM, conditionsTracker) -} - -func (r *VSphereVMReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1beta1.Cluster, vSphereCluster *infrav1.VSphereCluster, machine *clusterv1beta1.Machine, vSphereVM *infrav1.VSphereVM, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { - ipReconciler := r.getVMIpReconciler(vSphereCluster, vSphereVM) - if ret, err := ipReconciler.ReconcileIP(ctx); !ret.IsZero() || err != nil { - return ret, err - } - - bootstrapReconciler := r.getVMBootstrapReconciler(vSphereVM) - if ret, err := bootstrapReconciler.reconcileBoostrap(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { - return ret, err - } - - return ctrl.Result{}, nil -} - -func (r *VSphereVMReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, _ *infrav1.VSphereCluster, machine *clusterv1beta1.Machine, vSphereVM *infrav1.VSphereVM, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { - bootstrapReconciler := r.getVMBootstrapReconciler(vSphereVM) - if ret, err := bootstrapReconciler.reconcileDelete(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { - return ret, err - } - - controllerutil.RemoveFinalizer(vSphereVM, vcsimv1.VMFinalizer) - return ctrl.Result{}, nil + return backendReconciler.ReconcileNormal(ctx, cluster, machine, vSphereVM) } -func (r *VSphereVMReconciler) getVMIpReconciler(vSphereCluster *infrav1.VSphereCluster, vSphereVM *infrav1.VSphereVM) *vmIPReconciler { - return &vmIPReconciler{ - Client: r.Client, - - // Type specific functions; those functions wraps the differences between govmomi and supervisor types, - // thus allowing to use the same vmIPReconciler in both scenarios. +func (r *VSphereVMReconciler) backendReconcilerFactory(_ context.Context, vSphereCluster *infrav1.VSphereCluster, vSphereVM *infrav1.VSphereVM) backends.VirtualMachineReconciler { + return &inmemorybackend.VirtualMachineReconciler{ + Client: r.Client, + InMemoryManager: r.InMemoryManager, + APIServerMux: r.APIServerMux, + GetProviderID: func() string { + // Computes the ProviderID for the node hosted on the vSphereVM + return util.ConvertUUIDToProviderID(vSphereVM.Spec.BiosUUID) + }, GetVCenterSession: func(ctx context.Context) (*session.Session, error) { // Return a connection to the vCenter where the vSphereVM is hosted return r.getVCenterSession(ctx, vSphereCluster, vSphereVM) }, - IsVMWaitingforIP: func() bool { - // A vSphereVM is waiting for an IP when not ready VMProvisioned condition is false with reason WaitingForIPAllocation - return !vSphereVM.Status.Ready && v1beta1conditions.IsFalse(vSphereVM, infrav1.VMProvisionedCondition) && v1beta1conditions.GetReason(vSphereVM, infrav1.VMProvisionedCondition) == infrav1.WaitingForIPAllocationReason - }, GetVMPath: func() string { // Return vmref of the VM as it is populated already by CAPV return vSphereVM.Status.VMRef }, - } -} - -func (r *VSphereVMReconciler) getVMBootstrapReconciler(vSphereVM *infrav1.VSphereVM) *vmBootstrapReconciler { - return &vmBootstrapReconciler{ - Client: r.Client, - InMemoryManager: r.InMemoryManager, - APIServerMux: r.APIServerMux, - - // Type specific functions; those functions wraps the differences between govmomi and supervisor types, - // thus allowing to use the same vmBootstrapReconciler in both scenarios. IsVMReady: func() bool { // A vSphereVM is ready to provision fake objects hosted on it when both ready and BiosUUID is set (bios id is required when provisioning the node to compute the Provider ID) return vSphereVM.Status.Ready && vSphereVM.Spec.BiosUUID != "" }, - GetProviderID: func() string { - // Computes the ProviderID for the node hosted on the vSphereVM - return util.ConvertUUIDToProviderID(vSphereVM.Spec.BiosUUID) + IsVMWaitingforIP: func() bool { + // A vSphereVM is waiting for an IP when not ready VMProvisioned condition is false with reason WaitingForIPAllocation + return !vSphereVM.Status.Ready && v1beta1conditions.IsFalse(vSphereVM, infrav1.VMProvisionedCondition) && v1beta1conditions.GetReason(vSphereVM, infrav1.VMProvisionedCondition) == infrav1.WaitingForIPAllocationReason }, } } diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go index b1006c42db..41085b76b6 100644 --- a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go @@ -43,6 +43,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" + inmemorybackend "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/backends/inmemory" ) var ( @@ -55,10 +56,12 @@ var ( func init() { // scheme used for operating on the management cluster. _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = vmwarev1.AddToScheme(scheme) _ = vmoprv1.AddToScheme(scheme) _ = vcsimv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) // scheme used for operating on the cloud resource. _ = infrav1.AddToScheme(cloudScheme) @@ -79,27 +82,27 @@ func Test_Reconcile_VSphereVM(t *testing.T) { }, } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", UID: "bar", }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: infrav1.GroupVersion.Group, - Kind: "VSphereCluster", - Name: vsphereCluster.Name, + Spec: clusterv1beta1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: "VSphereCluster", + Name: vsphereCluster.Name, }, }, } - machine := &clusterv1.Machine{ + machine := &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", Labels: map[string]string{ - clusterv1.ClusterNameLabel: cluster.Name, + clusterv1beta1.ClusterNameLabel: cluster.Name, }, }, } @@ -184,10 +187,10 @@ func Test_Reconcile_VSphereVM(t *testing.T) { err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(vSphereVM), conditionsTracker) g.Expect(err).ToNot(HaveOccurred()) - c := v1beta1conditions.Get(conditionsTracker, VMProvisionedCondition) + c := v1beta1conditions.Get(conditionsTracker, inmemorybackend.VMProvisionedCondition) g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) g.Expect(c.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) - g.Expect(c.Reason).To(Equal(WaitingControlPlaneInitializedReason)) + g.Expect(c.Reason).To(Equal(inmemorybackend.WaitingControlPlaneInitializedReason)) }) t.Run("VSphereMachine provisioned gets a node (worker)", func(t *testing.T) { @@ -201,31 +204,31 @@ func Test_Reconcile_VSphereVM(t *testing.T) { }, } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", UID: "bar", }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: infrav1.GroupVersion.Group, - Kind: "VSphereCluster", - Name: vsphereCluster.Name, + Spec: clusterv1beta1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: "VSphereCluster", + Name: vsphereCluster.Name, }, }, } - machine := &clusterv1.Machine{ + machine := &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", Labels: map[string]string{ - clusterv1.ClusterNameLabel: cluster.Name, + clusterv1beta1.ClusterNameLabel: cluster.Name, }, }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ + Spec: clusterv1beta1.MachineSpec{ + Bootstrap: clusterv1beta1.Bootstrap{ DataSecretName: ptr.To("foo"), // this unblocks node provisioning }, }, @@ -305,7 +308,7 @@ func Test_Reconcile_VSphereVM(t *testing.T) { } // Reconcile - nodeStartupDuration = 0 * time.Second + inmemorybackend.NodeStartupDuration = 0 * time.Second res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ Namespace: vSphereVM.Namespace, @@ -320,7 +323,7 @@ func Test_Reconcile_VSphereVM(t *testing.T) { err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(vSphereVM), conditionsTracker) g.Expect(err).ToNot(HaveOccurred()) - c := v1beta1conditions.Get(conditionsTracker, NodeProvisionedCondition) + c := v1beta1conditions.Get(conditionsTracker, inmemorybackend.NodeProvisionedCondition) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) }) } diff --git a/test/infrastructure/vcsim/main.go b/test/infrastructure/vcsim/main.go index 578e665a59..ddcf8b1324 100644 --- a/test/infrastructure/vcsim/main.go +++ b/test/infrastructure/vcsim/main.go @@ -55,6 +55,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" @@ -84,6 +85,7 @@ var ( healthAddr string managerOptions = flags.ManagerOptions{} logOptions = logs.NewOptions() + webhookPort int // vcsim specific flags. vSphereVMConcurrency int virtualMachineConcurrency int @@ -169,6 +171,9 @@ func InitFlags(fs *pflag.FlagSet) { fs.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") + fs.IntVar(&webhookPort, "webhook-port", 9443, + "Webhook Server port") + flags.AddManagerOptions(fs, &managerOptions) feature.MutableGates.AddFlag(fs) @@ -245,13 +250,11 @@ func main() { }, }, }, - // WebhookServer: webhook.NewServer( - // webhook.Options{ - // Port: webhookPort, - // CertDir: webhookCertDir, - // TLSOpts: tlsOptionOverrides, - // }, - // ), + WebhookServer: webhook.NewServer( + webhook.Options{ + Port: webhookPort, + }, + ), } mgr, err := ctrl.NewManager(restConfig, ctrlOptions)