diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f7476667..57b1f798 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -31,9 +31,6 @@ jobs: with: go-version: '1.22' - - name: Apply Orange patch - run: | - cp -ar patches/* . - name: Log in to registry run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin diff --git a/.gitignore b/.gitignore index 71301c09..d3716d87 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ bin/* out/* tools/* cover.out +.tiltbuild \ No newline at end of file diff --git a/Makefile b/Makefile index 085fc716..200e8dd5 100644 --- a/Makefile +++ b/Makefile @@ -202,7 +202,7 @@ release: docker-build docker-push ## Build release images and push to registry. .PHONY: release-manifests release-manifests: kustomize ## Generate release manifests e.g. CRD, RBAC etc. - sed -e "s/__VERSION__/$(VERSION)/g" config/manager/manager.yaml.template > config/manager/manager.yaml + sed -e "s#__REGISTRY__#$(REGISTRY)#g" -e "s/__CAPVCD_IMG__/$(CAPVCD_IMG)/g" -e "s/__VERSION__/$(VERSION)/g" config/manager/manager.yaml.template > config/manager/manager.yaml $(KUSTOMIZE) build config/default > templates/infrastructure-components.yaml .PHONY: release-prep diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 092fe361..9c7ca5f3 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -39,18 +39,18 @@ rules: - get - list - watch -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - kubeadmconfigtemplates - verbs: - - create - - delete - - get - - list - - patch - - update - - watch +# - apiGroups: +# - bootstrap.cluster.x-k8s.io +# resources: +# - kubeadmconfigtemplates +# verbs: +# - create +# - delete +# - get +# - list +# - patch +# - update +# - watch - apiGroups: - cluster.x-k8s.io resources: @@ -81,18 +81,18 @@ rules: - get - list - watch -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - kubeadmcontrolplanes - verbs: - - create - - delete - - get - - list - - patch - - update - - watch +# - apiGroups: +# - controlplane.cluster.x-k8s.io +# resources: +# - kubeadmcontrolplanes +# verbs: +# - create +# - delete +# - get +# - list +# - patch +# - update +# - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controllers/capi_objects_utils.go b/controllers/capi_objects_utils.go index f070ef76..14dee111 100644 --- a/controllers/capi_objects_utils.go +++ b/controllers/capi_objects_utils.go @@ -6,14 +6,14 @@ import ( "github.com/pkg/errors" "github.com/vmware/cloud-provider-for-cloud-director/pkg/vcdsdk" infrav1 "github.com/vmware/cluster-api-provider-cloud-director/api/v1beta2" - rdeType "github.com/vmware/cluster-api-provider-cloud-director/pkg/vcdtypes/rde_type_1_1_0" + // rdeType "github.com/vmware/cluster-api-provider-cloud-director/pkg/vcdtypes/rde_type_1_1_0" "github.com/vmware/go-vcloud-director/v2/govcd" - "gopkg.in/yaml.v2" + // "gopkg.in/yaml.v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - kcpv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + // "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + // kcpv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "strings" @@ -33,98 +33,98 @@ func getTKGVersion(cluster *clusterv1.Cluster) string { // // keys. The function moves name and namespace from "objectmeta" key to "metadata" key and moves all the keys from "typemeta" // key to objMap -func filterTypeMetaAndObjectMetaFromK8sObjectMap(objMap map[string]interface{}) error { - if _, ok := objMap["typemeta"]; ok { - typeMetaMap, ok := objMap["typemeta"].(map[interface{}]interface{}) - if !ok { - return fmt.Errorf("failed to convert typemeta [%v] to map[interface{}]interface{}", objMap["typemeta"]) - } - // move contents of typeMetaMap to objMap. This preserves keys like apiVersion and Kind - for k, v := range typeMetaMap { - objMap[k.(string)] = v - } - delete(objMap, "typemeta") - } - - if _, ok := objMap["objectmeta"]; ok { - objectMetaMap, ok := objMap["objectmeta"].(map[interface{}]interface{}) - if !ok { - return fmt.Errorf("failed to convert objectmeta [%v] to map[interface{}]interface{}", objMap["objectmeta"]) - } - // remove all keys from objectMetaMap except for name and namespace. - for k := range objectMetaMap { - if k.(string) != "name" && k.(string) != "namespace" { - delete(objectMetaMap, k) - } - } - // preserve name and namespace of the object as part of "metadata" - objMap["metadata"] = objectMetaMap - delete(objMap, "objectmeta") - } - return nil -} - -func yamlWithoutStatus(obj interface{}) (string, error) { - // Redact the password and refresh token - // get yaml string for obj - objInByteArr, err := yaml.Marshal(obj) - if err != nil { - return "", fmt.Errorf("failed to marshal object: [%v]", err) - } - - objMap := make(map[string]interface{}) - if err := yaml.Unmarshal(objInByteArr, &objMap); err != nil { - return "", fmt.Errorf("failed to unmarshal object to map[string]interface{}: [%v]", err) - } - - // delete status key - if _, ok := objMap["status"]; ok { - delete(objMap, "status") - } - - err = filterTypeMetaAndObjectMetaFromK8sObjectMap(objMap) - if err != nil { - return "", fmt.Errorf("failed to remove type meta and object meta from kubernetes object [%v]: [%v]", objMap, err) - } - - // marshal back to a string - output, err := yaml.Marshal(objMap) - if err != nil { - return "", fmt.Errorf("failed to marshal modified object: [%v]", err) - } - return string(output), nil -} - -func getK8sObjectStatus(obj interface{}) (string, error) { - // Redact the password and refresh token - // get yaml string for obj - objInByteArr, err := yaml.Marshal(obj) - if err != nil { - return "", fmt.Errorf("failed to marshal object: [%v]", err) - } - - objMap := make(map[string]interface{}) - if err := yaml.Unmarshal(objInByteArr, &objMap); err != nil { - return "", fmt.Errorf("failed to unmarshal object to map[string]interface{}: [%v]", err) - } - - // delete spec key - if _, ok := objMap["spec"]; ok { - delete(objMap, "spec") - } - - err = filterTypeMetaAndObjectMetaFromK8sObjectMap(objMap) - if err != nil { - return "", fmt.Errorf("failed to remove type meta and object meta from kubernetes object [%v]: [%v]", objMap, err) - } - - // marshal back to a string - output, err := yaml.Marshal(objMap) - if err != nil { - return "", fmt.Errorf("failed to marshal modified object: [%v]", err) - } - return string(output), nil -} +// func filterTypeMetaAndObjectMetaFromK8sObjectMap(objMap map[string]interface{}) error { +// if _, ok := objMap["typemeta"]; ok { +// typeMetaMap, ok := objMap["typemeta"].(map[interface{}]interface{}) +// if !ok { +// return fmt.Errorf("failed to convert typemeta [%v] to map[interface{}]interface{}", objMap["typemeta"]) +// } +// // move contents of typeMetaMap to objMap. This preserves keys like apiVersion and Kind +// for k, v := range typeMetaMap { +// objMap[k.(string)] = v +// } +// delete(objMap, "typemeta") +// } +// +// if _, ok := objMap["objectmeta"]; ok { +// objectMetaMap, ok := objMap["objectmeta"].(map[interface{}]interface{}) +// if !ok { +// return fmt.Errorf("failed to convert objectmeta [%v] to map[interface{}]interface{}", objMap["objectmeta"]) +// } +// // remove all keys from objectMetaMap except for name and namespace. +// for k := range objectMetaMap { +// if k.(string) != "name" && k.(string) != "namespace" { +// delete(objectMetaMap, k) +// } +// } +// // preserve name and namespace of the object as part of "metadata" +// objMap["metadata"] = objectMetaMap +// delete(objMap, "objectmeta") +// } +// return nil +// } + +// func yamlWithoutStatus(obj interface{}) (string, error) { +// // Redact the password and refresh token +// // get yaml string for obj +// objInByteArr, err := yaml.Marshal(obj) +// if err != nil { +// return "", fmt.Errorf("failed to marshal object: [%v]", err) +// } +// +// objMap := make(map[string]interface{}) +// if err := yaml.Unmarshal(objInByteArr, &objMap); err != nil { +// return "", fmt.Errorf("failed to unmarshal object to map[string]interface{}: [%v]", err) +// } +// +// // delete status key +// if _, ok := objMap["status"]; ok { +// delete(objMap, "status") +// } +// +// err = filterTypeMetaAndObjectMetaFromK8sObjectMap(objMap) +// if err != nil { +// return "", fmt.Errorf("failed to remove type meta and object meta from kubernetes object [%v]: [%v]", objMap, err) +// } +// +// // marshal back to a string +// output, err := yaml.Marshal(objMap) +// if err != nil { +// return "", fmt.Errorf("failed to marshal modified object: [%v]", err) +// } +// return string(output), nil +// } + +// func getK8sObjectStatus(obj interface{}) (string, error) { +// // Redact the password and refresh token +// // get yaml string for obj +// objInByteArr, err := yaml.Marshal(obj) +// if err != nil { +// return "", fmt.Errorf("failed to marshal object: [%v]", err) +// } +// +// objMap := make(map[string]interface{}) +// if err := yaml.Unmarshal(objInByteArr, &objMap); err != nil { +// return "", fmt.Errorf("failed to unmarshal object to map[string]interface{}: [%v]", err) +// } +// +// // delete spec key +// if _, ok := objMap["spec"]; ok { +// delete(objMap, "spec") +// } +// +// err = filterTypeMetaAndObjectMetaFromK8sObjectMap(objMap) +// if err != nil { +// return "", fmt.Errorf("failed to remove type meta and object meta from kubernetes object [%v]: [%v]", objMap, err) +// } +// +// // marshal back to a string +// output, err := yaml.Marshal(objMap) +// if err != nil { +// return "", fmt.Errorf("failed to marshal modified object: [%v]", err) +// } +// return string(output), nil +// } func getOrgByName(client *vcdsdk.Client, orgName string) (*govcd.Org, error) { org, err := client.VCDClient.GetOrgByName(orgName) @@ -309,15 +309,15 @@ func getAllMachineDeploymentsForCluster(ctx context.Context, cli client.Client, return mdList, nil } -func getAllKubeadmControlPlaneForCluster(ctx context.Context, cli client.Client, c clusterv1.Cluster) (*kcpv1.KubeadmControlPlaneList, error) { - kcpListLabels := map[string]string{clusterv1.ClusterNameLabel: c.Name} - kcpList := &kcpv1.KubeadmControlPlaneList{} - - if err := cli.List(ctx, kcpList, client.InNamespace(c.Namespace), client.MatchingLabels(kcpListLabels)); err != nil { - return nil, errors.Wrapf(err, "error getting all kubeadm control planes for the cluster [%s]", c.Name) - } - return kcpList, nil -} +// func getAllKubeadmControlPlaneForCluster(ctx context.Context, cli client.Client, c clusterv1.Cluster) (*kcpv1.KubeadmControlPlaneList, error) { +// kcpListLabels := map[string]string{clusterv1.ClusterNameLabel: c.Name} +// kcpList := &kcpv1.KubeadmControlPlaneList{} +// +// if err := cli.List(ctx, kcpList, client.InNamespace(c.Namespace), client.MatchingLabels(kcpListLabels)); err != nil { +// return nil, errors.Wrapf(err, "error getting all kubeadm control planes for the cluster [%s]", c.Name) +// } +// return kcpList, nil +// } func getAllCRSBindingForCluster(ctx context.Context, cli client.Client, c clusterv1.Cluster) (*addonsv1.ClusterResourceSetBindingList, error) { @@ -331,273 +331,273 @@ func getAllCRSBindingForCluster(ctx context.Context, cli client.Client, return crsBindingList, nil } -func getVCDMachineTemplateFromKCP(ctx context.Context, cli client.Client, kcp kcpv1.KubeadmControlPlane) (*infrav1.VCDMachineTemplate, error) { - vcdMachineTemplateRef := kcp.Spec.MachineTemplate.InfrastructureRef - vcdMachineTemplate := &infrav1.VCDMachineTemplate{} - vcdMachineTemplateKey := types.NamespacedName{ - Namespace: vcdMachineTemplateRef.Namespace, - Name: vcdMachineTemplateRef.Name, - } - if err := cli.Get(ctx, vcdMachineTemplateKey, vcdMachineTemplate); err != nil { - return nil, fmt.Errorf("failed to get VCDMachineTemplate by name [%s] from KCP [%s]: [%v]", vcdMachineTemplateRef.Name, kcp.Name, err) - } - - return vcdMachineTemplate, nil -} - -func getVCDMachineTemplateFromMachineDeployment(ctx context.Context, cli client.Client, md clusterv1.MachineDeployment) (*infrav1.VCDMachineTemplate, error) { - vcdMachineTemplateRef := md.Spec.Template.Spec.InfrastructureRef - vcdMachineTemplate := &infrav1.VCDMachineTemplate{} - vcdMachineTemplateKey := client.ObjectKey{ - Namespace: vcdMachineTemplateRef.Namespace, - Name: vcdMachineTemplateRef.Name, - } - if err := cli.Get(ctx, vcdMachineTemplateKey, vcdMachineTemplate); err != nil { - return nil, fmt.Errorf("failed to get VCDMachineTemplate by name [%s] from machine deployment [%s]: [%v]", vcdMachineTemplateRef.Name, md.Name, err) - } - - return vcdMachineTemplate, nil -} - -func getMachineListFromCluster(ctx context.Context, cli client.Client, cluster clusterv1.Cluster) (*clusterv1.MachineList, error) { - machineListLabels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} - machineList := &clusterv1.MachineList{} - if err := cli.List(ctx, machineList, client.InNamespace(cluster.Namespace), client.MatchingLabels(machineListLabels)); err != nil { - return nil, errors.Wrapf(err, "error getting machine list for the cluster [%s]", cluster.Name) - } - return machineList, nil -} - -func getVCDMachineTemplateByObjRef(ctx context.Context, cli client.Client, objRef v1.ObjectReference) (*infrav1.VCDMachineTemplate, error) { - vcdMachineTemplate := &infrav1.VCDMachineTemplate{} - vcdMachineTemplateKey := client.ObjectKey{ - Namespace: objRef.Namespace, - Name: objRef.Name, - } - if err := cli.Get(ctx, vcdMachineTemplateKey, vcdMachineTemplate); err != nil { - return nil, fmt.Errorf("failed to get VCDMachineTemplate by ObjectReference [%v]: [%v]", objRef, err) - } - - return vcdMachineTemplate, nil -} - -func getKubeadmConfigTemplateByObjRef(ctx context.Context, cli client.Client, objRef v1.ObjectReference) (*v1beta1.KubeadmConfigTemplate, error) { - kubeadmConfigTemplate := &v1beta1.KubeadmConfigTemplate{} - kubeadmConfigTemplateKey := client.ObjectKey{ - Namespace: objRef.Namespace, - Name: objRef.Name, - } - if err := cli.Get(ctx, kubeadmConfigTemplateKey, kubeadmConfigTemplate); err != nil { - return nil, fmt.Errorf("failed to get KubeadmConfigTemplate by ObjectReference [%v]: [%v]", objRef, err) - } - - return kubeadmConfigTemplate, nil -} - -func getAllMachinesInMachineDeployment(ctx context.Context, cli client.Client, machineDeployment clusterv1.MachineDeployment) (*clusterv1.MachineList, error) { - machineListLabels := map[string]string{clusterv1.MachineDeploymentNameLabel: machineDeployment.Name} - machineList := &clusterv1.MachineList{} - if err := cli.List(ctx, machineList, client.InNamespace(machineDeployment.Namespace), client.MatchingLabels(machineListLabels)); err != nil { - return nil, errors.Wrapf(err, "error getting machine list for the cluster [%s]", machineDeployment.Name) - } - return machineList, nil -} - -func getAllMachinesInKCP(ctx context.Context, cli client.Client, kcp kcpv1.KubeadmControlPlane, clusterName string) ([]clusterv1.Machine, error) { - machineListLabels := map[string]string{clusterv1.ClusterNameLabel: clusterName} - machineList := &clusterv1.MachineList{} - if err := cli.List(ctx, machineList, client.InNamespace(kcp.Namespace), client.MatchingLabels(machineListLabels)); err != nil { - return nil, errors.Wrapf(err, "error getting machine list associated with KCP [%s]: [%v]", kcp.Name, err) - } - // TODO find a better way to find all machines in KCP - machinesWithKCPOwnerRef := make([]clusterv1.Machine, 0) - for _, m := range machineList.Items { - for _, ref := range m.OwnerReferences { - if ref.Kind == "KubeadmControlPlane" && ref.Name == kcp.Name { - machinesWithKCPOwnerRef = append(machinesWithKCPOwnerRef, m) - break - } - } - } - return machinesWithKCPOwnerRef, nil -} - -func getNodePoolList(ctx context.Context, cli client.Client, cluster clusterv1.Cluster) ([]rdeType.NodePool, error) { - nodePoolList := make([]rdeType.NodePool, 0) - mds, err := getAllMachineDeploymentsForCluster(ctx, cli, cluster) - if err != nil { - return nil, fmt.Errorf("failed to query all machine deployments for the cluster [%s]: [%v]", cluster.Name, err) - } - for _, md := range mds.Items { - // create a node pool for each machine deployment - vcdMachineTemplate, err := getVCDMachineTemplateFromMachineDeployment(ctx, cli, md) - if err != nil { - return nil, fmt.Errorf("failed to get VCDMachineTemplate associated with the MachineDeployment [%s]: [%v]", md.Name, err) - } - // query all machines in machine deployment using machine deployment label - machineList, err := getAllMachinesInMachineDeployment(ctx, cli, md) - if err != nil { - return nil, fmt.Errorf("failed to get MachineList for MachineDeployment [%s]: [%v]", md.Name, err) - } - nodeStatusMap := make(map[string]string) - for _, machine := range machineList.Items { - nodeStatusMap[machine.Name] = machine.Status.Phase - } - desiredReplicasCount := int32(0) - if md.Spec.Replicas != nil { - desiredReplicasCount = *md.Spec.Replicas - } - nodePool := rdeType.NodePool{ - Name: md.Name, - SizingPolicy: vcdMachineTemplate.Spec.Template.Spec.SizingPolicy, - PlacementPolicy: vcdMachineTemplate.Spec.Template.Spec.PlacementPolicy, - NvidiaGpuEnabled: vcdMachineTemplate.Spec.Template.Spec.EnableNvidiaGPU, - StorageProfile: vcdMachineTemplate.Spec.Template.Spec.StorageProfile, - DiskSizeMb: int32(vcdMachineTemplate.Spec.Template.Spec.DiskSize.Value() / (1024 * 1024)), - DesiredReplicas: desiredReplicasCount, - AvailableReplicas: md.Status.ReadyReplicas, - NodeStatus: nodeStatusMap, - } - nodePoolList = append(nodePoolList, nodePool) - } - - kcpList, err := getAllKubeadmControlPlaneForCluster(ctx, cli, cluster) - if err != nil { - return nil, fmt.Errorf("failed to query all KubeadmControlPlane objects for the cluster [%s]: [%v]", cluster.Name, err) - } - for _, kcp := range kcpList.Items { - // create a node pool for each kcp - vcdMachineTemplate, err := getVCDMachineTemplateFromKCP(ctx, cli, kcp) - if err != nil { - return nil, fmt.Errorf("failed to get VCDMachineTemplate associated with KubeadmControlPlane [%s]: [%v]", kcp.Name, err) - } - // query all machines with the kcp - machineArr, err := getAllMachinesInKCP(ctx, cli, kcp, cluster.Name) - if err != nil { - return nil, fmt.Errorf("failed to get Machines associated with the KubeadmControlPlane [%s]: [%v]", kcp.Name, err) - } - nodeStatusMap := make(map[string]string) - for _, machine := range machineArr { - nodeStatusMap[machine.Name] = machine.Status.Phase - } - desiredReplicaCount := int32(0) - if kcp.Spec.Replicas != nil { - desiredReplicaCount = *kcp.Spec.Replicas - } - nodePool := rdeType.NodePool{ - Name: kcp.Name, - SizingPolicy: vcdMachineTemplate.Spec.Template.Spec.SizingPolicy, - PlacementPolicy: vcdMachineTemplate.Spec.Template.Spec.PlacementPolicy, - NvidiaGpuEnabled: vcdMachineTemplate.Spec.Template.Spec.EnableNvidiaGPU, - StorageProfile: vcdMachineTemplate.Spec.Template.Spec.StorageProfile, - DiskSizeMb: int32(vcdMachineTemplate.Spec.Template.Spec.DiskSize.Value() / (1024 * 1024)), - DesiredReplicas: desiredReplicaCount, - AvailableReplicas: kcp.Status.ReadyReplicas, - NodeStatus: nodeStatusMap, - } - nodePoolList = append(nodePoolList, nodePool) - } - return nodePoolList, nil -} - -func getK8sClusterObjects(ctx context.Context, cli client.Client, cluster clusterv1.Cluster, vcdCluster infrav1.VCDCluster) ([]interface{}, error) { - // Redacting username, password and refresh token from the UserCredentialsContext for security purposes. - vcdCluster.Spec.UserCredentialsContext.Username = "***REDACTED***" - vcdCluster.Spec.UserCredentialsContext.Password = "***REDACTED***" - vcdCluster.Spec.UserCredentialsContext.RefreshToken = "***REDACTED***" - capiYamlObjects := []interface{}{ - cluster, - vcdCluster, - } - - kcpList, err := getAllKubeadmControlPlaneForCluster(ctx, cli, cluster) - if err != nil { - return nil, fmt.Errorf("failed to get all KCPs from Cluster object: [%v]", err) - } - - mdList, err := getAllMachineDeploymentsForCluster(ctx, cli, cluster) - if err != nil { - return nil, fmt.Errorf("failed to get all the MachineDeployments from Cluster: [%v]", err) - } - - vcdMachineTemplateNameToObjRef := make(map[string]v1.ObjectReference) - for _, kcp := range kcpList.Items { - vcdMachineTemplateNameToObjRef[kcp.Spec.MachineTemplate.InfrastructureRef.Name] = kcp.Spec.MachineTemplate.InfrastructureRef - } - - kubeadmConfigTemplateNameToObjRef := make(map[string]*v1.ObjectReference) - for _, md := range mdList.Items { - vcdMachineTemplateNameToObjRef[md.Spec.Template.Spec.InfrastructureRef.Name] = md.Spec.Template.Spec.InfrastructureRef - kubeadmConfigTemplateNameToObjRef[md.Spec.Template.Spec.Bootstrap.ConfigRef.Name] = md.Spec.Template.Spec.Bootstrap.ConfigRef - } - - vcdMachineTemplates := make([]*infrav1.VCDMachineTemplate, 0) - for _, objRef := range vcdMachineTemplateNameToObjRef { - vcdMachineTemplate, err := getVCDMachineTemplateByObjRef(ctx, cli, objRef) - if err != nil { - return nil, fmt.Errorf("failed to get VCDMachineTemplate by ObjectReference [%v]: [%v]", objRef, err) - } - vcdMachineTemplates = append(vcdMachineTemplates, vcdMachineTemplate) - } - - kubeadmConfigTemplates := make([]*v1beta1.KubeadmConfigTemplate, 0) - for _, objRef := range kubeadmConfigTemplateNameToObjRef { - kubeadmConifgTemplate, err := getKubeadmConfigTemplateByObjRef(ctx, cli, *objRef) - if err != nil { - return nil, fmt.Errorf("failed to get KubeadmConfigTemplate by ObjectReference [%v]: [%v]", objRef, err) - } - kubeadmConfigTemplates = append(kubeadmConfigTemplates, kubeadmConifgTemplate) - } - - // add objects - for _, vcdMachineTemplate := range vcdMachineTemplates { - capiYamlObjects = append(capiYamlObjects, *vcdMachineTemplate) - } - for _, kubeadmConfigTemplate := range kubeadmConfigTemplates { - capiYamlObjects = append(capiYamlObjects, *kubeadmConfigTemplate) - } - for _, kcp := range kcpList.Items { - capiYamlObjects = append(capiYamlObjects, kcp) - } - for _, md := range mdList.Items { - capiYamlObjects = append(capiYamlObjects, md) - } - return capiYamlObjects, nil -} - -func getCapiYaml(ctx context.Context, cli client.Client, cluster clusterv1.Cluster, vcdCluster infrav1.VCDCluster) (string, error) { - capiYamlObjects, err := getK8sClusterObjects(ctx, cli, cluster, vcdCluster) - if err != nil { - return "", fmt.Errorf("failed to get k8s objects related to cluster [%s]: [%v]", cluster.Name, err) - } - yamlObjects := make([]string, len(capiYamlObjects)) - for idx, obj := range capiYamlObjects { - yamlString, err := yamlWithoutStatus(obj) - if err != nil { - return "", fmt.Errorf("failed to convert object to yaml: [%v]", err) - } - yamlObjects[idx] = yamlString - } - - return strings.Join(yamlObjects, "---\n"), nil - -} - -func getCapiStatusYaml(ctx context.Context, cli client.Client, cluster clusterv1.Cluster, vcdCluster infrav1.VCDCluster) (string, error) { - capiYamlObjects, err := getK8sClusterObjects(ctx, cli, cluster, vcdCluster) - if err != nil { - return "", fmt.Errorf("failed to get k8s objects related to cluster [%s]: [%v]", cluster.Name, err) - } - yamlObjects := make([]string, len(capiYamlObjects)) - for idx, obj := range capiYamlObjects { - yamlStatusString, err := getK8sObjectStatus(obj) - if err != nil { - return "", fmt.Errorf("failed to extract status from kuberenets object: [%v]", err) - } - yamlObjects[idx] = yamlStatusString - } - return strings.Join(yamlObjects, "---\n"), nil -} +// func getVCDMachineTemplateFromKCP(ctx context.Context, cli client.Client, kcp kcpv1.KubeadmControlPlane) (*infrav1.VCDMachineTemplate, error) { +// vcdMachineTemplateRef := kcp.Spec.MachineTemplate.InfrastructureRef +// vcdMachineTemplate := &infrav1.VCDMachineTemplate{} +// vcdMachineTemplateKey := types.NamespacedName{ +// Namespace: vcdMachineTemplateRef.Namespace, +// Name: vcdMachineTemplateRef.Name, +// } +// if err := cli.Get(ctx, vcdMachineTemplateKey, vcdMachineTemplate); err != nil { +// return nil, fmt.Errorf("failed to get VCDMachineTemplate by name [%s] from KCP [%s]: [%v]", vcdMachineTemplateRef.Name, kcp.Name, err) +// } +// +// return vcdMachineTemplate, nil +// } + +// func getVCDMachineTemplateFromMachineDeployment(ctx context.Context, cli client.Client, md clusterv1.MachineDeployment) (*infrav1.VCDMachineTemplate, error) { +// vcdMachineTemplateRef := md.Spec.Template.Spec.InfrastructureRef +// vcdMachineTemplate := &infrav1.VCDMachineTemplate{} +// vcdMachineTemplateKey := client.ObjectKey{ +// Namespace: vcdMachineTemplateRef.Namespace, +// Name: vcdMachineTemplateRef.Name, +// } +// if err := cli.Get(ctx, vcdMachineTemplateKey, vcdMachineTemplate); err != nil { +// return nil, fmt.Errorf("failed to get VCDMachineTemplate by name [%s] from machine deployment [%s]: [%v]", vcdMachineTemplateRef.Name, md.Name, err) +// } +// +// return vcdMachineTemplate, nil +// } + +// func getMachineListFromCluster(ctx context.Context, cli client.Client, cluster clusterv1.Cluster) (*clusterv1.MachineList, error) { +// machineListLabels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} +// machineList := &clusterv1.MachineList{} +// if err := cli.List(ctx, machineList, client.InNamespace(cluster.Namespace), client.MatchingLabels(machineListLabels)); err != nil { +// return nil, errors.Wrapf(err, "error getting machine list for the cluster [%s]", cluster.Name) +// } +// return machineList, nil +// } + +// func getVCDMachineTemplateByObjRef(ctx context.Context, cli client.Client, objRef v1.ObjectReference) (*infrav1.VCDMachineTemplate, error) { +// vcdMachineTemplate := &infrav1.VCDMachineTemplate{} +// vcdMachineTemplateKey := client.ObjectKey{ +// Namespace: objRef.Namespace, +// Name: objRef.Name, +// } +// if err := cli.Get(ctx, vcdMachineTemplateKey, vcdMachineTemplate); err != nil { +// return nil, fmt.Errorf("failed to get VCDMachineTemplate by ObjectReference [%v]: [%v]", objRef, err) +// } +// +// return vcdMachineTemplate, nil +// } + +// func getKubeadmConfigTemplateByObjRef(ctx context.Context, cli client.Client, objRef v1.ObjectReference) (*v1beta1.KubeadmConfigTemplate, error) { +// kubeadmConfigTemplate := &v1beta1.KubeadmConfigTemplate{} +// kubeadmConfigTemplateKey := client.ObjectKey{ +// Namespace: objRef.Namespace, +// Name: objRef.Name, +// } +// if err := cli.Get(ctx, kubeadmConfigTemplateKey, kubeadmConfigTemplate); err != nil { +// return nil, fmt.Errorf("failed to get KubeadmConfigTemplate by ObjectReference [%v]: [%v]", objRef, err) +// } +// +// return kubeadmConfigTemplate, nil +// } + +// func getAllMachinesInMachineDeployment(ctx context.Context, cli client.Client, machineDeployment clusterv1.MachineDeployment) (*clusterv1.MachineList, error) { +// machineListLabels := map[string]string{clusterv1.MachineDeploymentNameLabel: machineDeployment.Name} +// machineList := &clusterv1.MachineList{} +// if err := cli.List(ctx, machineList, client.InNamespace(machineDeployment.Namespace), client.MatchingLabels(machineListLabels)); err != nil { +// return nil, errors.Wrapf(err, "error getting machine list for the cluster [%s]", machineDeployment.Name) +// } +// return machineList, nil +// } + +// func getAllMachinesInKCP(ctx context.Context, cli client.Client, kcp kcpv1.KubeadmControlPlane, clusterName string) ([]clusterv1.Machine, error) { +// machineListLabels := map[string]string{clusterv1.ClusterNameLabel: clusterName} +// machineList := &clusterv1.MachineList{} +// if err := cli.List(ctx, machineList, client.InNamespace(kcp.Namespace), client.MatchingLabels(machineListLabels)); err != nil { +// return nil, errors.Wrapf(err, "error getting machine list associated with KCP [%s]: [%v]", kcp.Name, err) +// } +// // TODO find a better way to find all machines in KCP +// machinesWithKCPOwnerRef := make([]clusterv1.Machine, 0) +// for _, m := range machineList.Items { +// for _, ref := range m.OwnerReferences { +// if ref.Kind == "KubeadmControlPlane" && ref.Name == kcp.Name { +// machinesWithKCPOwnerRef = append(machinesWithKCPOwnerRef, m) +// break +// } +// } +// } +// return machinesWithKCPOwnerRef, nil +// } + +// func getNodePoolList(ctx context.Context, cli client.Client, cluster clusterv1.Cluster) ([]rdeType.NodePool, error) { +// nodePoolList := make([]rdeType.NodePool, 0) +// mds, err := getAllMachineDeploymentsForCluster(ctx, cli, cluster) +// if err != nil { +// return nil, fmt.Errorf("failed to query all machine deployments for the cluster [%s]: [%v]", cluster.Name, err) +// } +// for _, md := range mds.Items { +// // create a node pool for each machine deployment +// vcdMachineTemplate, err := getVCDMachineTemplateFromMachineDeployment(ctx, cli, md) +// if err != nil { +// return nil, fmt.Errorf("failed to get VCDMachineTemplate associated with the MachineDeployment [%s]: [%v]", md.Name, err) +// } +// // query all machines in machine deployment using machine deployment label +// machineList, err := getAllMachinesInMachineDeployment(ctx, cli, md) +// if err != nil { +// return nil, fmt.Errorf("failed to get MachineList for MachineDeployment [%s]: [%v]", md.Name, err) +// } +// nodeStatusMap := make(map[string]string) +// for _, machine := range machineList.Items { +// nodeStatusMap[machine.Name] = machine.Status.Phase +// } +// desiredReplicasCount := int32(0) +// if md.Spec.Replicas != nil { +// desiredReplicasCount = *md.Spec.Replicas +// } +// nodePool := rdeType.NodePool{ +// Name: md.Name, +// SizingPolicy: vcdMachineTemplate.Spec.Template.Spec.SizingPolicy, +// PlacementPolicy: vcdMachineTemplate.Spec.Template.Spec.PlacementPolicy, +// NvidiaGpuEnabled: vcdMachineTemplate.Spec.Template.Spec.EnableNvidiaGPU, +// StorageProfile: vcdMachineTemplate.Spec.Template.Spec.StorageProfile, +// DiskSizeMb: int32(vcdMachineTemplate.Spec.Template.Spec.DiskSize.Value() / (1024 * 1024)), +// DesiredReplicas: desiredReplicasCount, +// AvailableReplicas: md.Status.ReadyReplicas, +// NodeStatus: nodeStatusMap, +// } +// nodePoolList = append(nodePoolList, nodePool) +// } +// +// kcpList, err := getAllKubeadmControlPlaneForCluster(ctx, cli, cluster) +// if err != nil { +// return nil, fmt.Errorf("failed to query all KubeadmControlPlane objects for the cluster [%s]: [%v]", cluster.Name, err) +// } +// for _, kcp := range kcpList.Items { +// // create a node pool for each kcp +// vcdMachineTemplate, err := getVCDMachineTemplateFromKCP(ctx, cli, kcp) +// if err != nil { +// return nil, fmt.Errorf("failed to get VCDMachineTemplate associated with KubeadmControlPlane [%s]: [%v]", kcp.Name, err) +// } +// // query all machines with the kcp +// machineArr, err := getAllMachinesInKCP(ctx, cli, kcp, cluster.Name) +// if err != nil { +// return nil, fmt.Errorf("failed to get Machines associated with the KubeadmControlPlane [%s]: [%v]", kcp.Name, err) +// } +// nodeStatusMap := make(map[string]string) +// for _, machine := range machineArr { +// nodeStatusMap[machine.Name] = machine.Status.Phase +// } +// desiredReplicaCount := int32(0) +// if kcp.Spec.Replicas != nil { +// desiredReplicaCount = *kcp.Spec.Replicas +// } +// nodePool := rdeType.NodePool{ +// Name: kcp.Name, +// SizingPolicy: vcdMachineTemplate.Spec.Template.Spec.SizingPolicy, +// PlacementPolicy: vcdMachineTemplate.Spec.Template.Spec.PlacementPolicy, +// NvidiaGpuEnabled: vcdMachineTemplate.Spec.Template.Spec.EnableNvidiaGPU, +// StorageProfile: vcdMachineTemplate.Spec.Template.Spec.StorageProfile, +// DiskSizeMb: int32(vcdMachineTemplate.Spec.Template.Spec.DiskSize.Value() / (1024 * 1024)), +// DesiredReplicas: desiredReplicaCount, +// AvailableReplicas: kcp.Status.ReadyReplicas, +// NodeStatus: nodeStatusMap, +// } +// nodePoolList = append(nodePoolList, nodePool) +// } +// return nodePoolList, nil +// } + +// func getK8sClusterObjects(ctx context.Context, cli client.Client, cluster clusterv1.Cluster, vcdCluster infrav1.VCDCluster) ([]interface{}, error) { +// // Redacting username, password and refresh token from the UserCredentialsContext for security purposes. +// vcdCluster.Spec.UserCredentialsContext.Username = "***REDACTED***" +// vcdCluster.Spec.UserCredentialsContext.Password = "***REDACTED***" +// vcdCluster.Spec.UserCredentialsContext.RefreshToken = "***REDACTED***" +// capiYamlObjects := []interface{}{ +// cluster, +// vcdCluster, +// } +// +// kcpList, err := getAllKubeadmControlPlaneForCluster(ctx, cli, cluster) +// if err != nil { +// return nil, fmt.Errorf("failed to get all KCPs from Cluster object: [%v]", err) +// } +// +// mdList, err := getAllMachineDeploymentsForCluster(ctx, cli, cluster) +// if err != nil { +// return nil, fmt.Errorf("failed to get all the MachineDeployments from Cluster: [%v]", err) +// } +// +// vcdMachineTemplateNameToObjRef := make(map[string]v1.ObjectReference) +// for _, kcp := range kcpList.Items { +// vcdMachineTemplateNameToObjRef[kcp.Spec.MachineTemplate.InfrastructureRef.Name] = kcp.Spec.MachineTemplate.InfrastructureRef +// } +// +// kubeadmConfigTemplateNameToObjRef := make(map[string]*v1.ObjectReference) +// for _, md := range mdList.Items { +// vcdMachineTemplateNameToObjRef[md.Spec.Template.Spec.InfrastructureRef.Name] = md.Spec.Template.Spec.InfrastructureRef +// kubeadmConfigTemplateNameToObjRef[md.Spec.Template.Spec.Bootstrap.ConfigRef.Name] = md.Spec.Template.Spec.Bootstrap.ConfigRef +// } +// +// vcdMachineTemplates := make([]*infrav1.VCDMachineTemplate, 0) +// for _, objRef := range vcdMachineTemplateNameToObjRef { +// vcdMachineTemplate, err := getVCDMachineTemplateByObjRef(ctx, cli, objRef) +// if err != nil { +// return nil, fmt.Errorf("failed to get VCDMachineTemplate by ObjectReference [%v]: [%v]", objRef, err) +// } +// vcdMachineTemplates = append(vcdMachineTemplates, vcdMachineTemplate) +// } +// +// kubeadmConfigTemplates := make([]*v1beta1.KubeadmConfigTemplate, 0) +// for _, objRef := range kubeadmConfigTemplateNameToObjRef { +// kubeadmConifgTemplate, err := getKubeadmConfigTemplateByObjRef(ctx, cli, *objRef) +// if err != nil { +// return nil, fmt.Errorf("failed to get KubeadmConfigTemplate by ObjectReference [%v]: [%v]", objRef, err) +// } +// kubeadmConfigTemplates = append(kubeadmConfigTemplates, kubeadmConifgTemplate) +// } +// +// // add objects +// for _, vcdMachineTemplate := range vcdMachineTemplates { +// capiYamlObjects = append(capiYamlObjects, *vcdMachineTemplate) +// } +// for _, kubeadmConfigTemplate := range kubeadmConfigTemplates { +// capiYamlObjects = append(capiYamlObjects, *kubeadmConfigTemplate) +// } +// for _, kcp := range kcpList.Items { +// capiYamlObjects = append(capiYamlObjects, kcp) +// } +// for _, md := range mdList.Items { +// capiYamlObjects = append(capiYamlObjects, md) +// } +// return capiYamlObjects, nil +// } + +// func getCapiYaml(ctx context.Context, cli client.Client, cluster clusterv1.Cluster, vcdCluster infrav1.VCDCluster) (string, error) { +// capiYamlObjects, err := getK8sClusterObjects(ctx, cli, cluster, vcdCluster) +// if err != nil { +// return "", fmt.Errorf("failed to get k8s objects related to cluster [%s]: [%v]", cluster.Name, err) +// } +// yamlObjects := make([]string, len(capiYamlObjects)) +// for idx, obj := range capiYamlObjects { +// yamlString, err := yamlWithoutStatus(obj) +// if err != nil { +// return "", fmt.Errorf("failed to convert object to yaml: [%v]", err) +// } +// yamlObjects[idx] = yamlString +// } +// +// return strings.Join(yamlObjects, "---\n"), nil +// +// } + +// func getCapiStatusYaml(ctx context.Context, cli client.Client, cluster clusterv1.Cluster, vcdCluster infrav1.VCDCluster) (string, error) { +// capiYamlObjects, err := getK8sClusterObjects(ctx, cli, cluster, vcdCluster) +// if err != nil { +// return "", fmt.Errorf("failed to get k8s objects related to cluster [%s]: [%v]", cluster.Name, err) +// } +// yamlObjects := make([]string, len(capiYamlObjects)) +// for idx, obj := range capiYamlObjects { +// yamlStatusString, err := getK8sObjectStatus(obj) +// if err != nil { +// return "", fmt.Errorf("failed to extract status from kuberenets object: [%v]", err) +// } +// yamlObjects[idx] = yamlStatusString +// } +// return strings.Join(yamlObjects, "---\n"), nil +// } func getUserCredentialsForCluster(ctx context.Context, cli client.Client, definedCreds infrav1.UserCredentialsContext) (infrav1.UserCredentialsContext, error) { username, password, refreshToken := definedCreds.Username, definedCreds.Password, definedCreds.RefreshToken @@ -632,31 +632,31 @@ func getUserCredentialsForCluster(ctx context.Context, cli client.Client, define // hasClusterReconciledToDesiredK8Version returns true if all the kubeadm control plane objects and machine deployments have // reconciled to the desired kubernetes version, else returns false. -func hasClusterReconciledToDesiredK8Version(ctx context.Context, cli client.Client, clusterName string, - kcpList *kcpv1.KubeadmControlPlaneList, mdList *clusterv1.MachineDeploymentList, expectedVersion string) (bool, error) { - - for _, kcp := range kcpList.Items { - machines, err := getAllMachinesInKCP(ctx, cli, kcp, clusterName) - if err != nil { - return false, fmt.Errorf("failed to fetch machines for the kubeadm control plane object [%s] for cluster [%s]: [%v]", kcp.Name, clusterName, err) - } - for _, machine := range machines { - if machine.Spec.Version != nil && *machine.Spec.Version != expectedVersion { - return false, nil - } - } - } - - for _, md := range mdList.Items { - machineList, err := getAllMachinesInMachineDeployment(ctx, cli, md) - if err != nil { - return false, fmt.Errorf("failed to fetch machines for the machine deployment [%s] for cluster [%s]: [%v]", md.Name, clusterName, err) - } - for _, machine := range machineList.Items { - if machine.Spec.Version != nil && *machine.Spec.Version != expectedVersion { - return false, nil - } - } - } - return true, nil -} +// func hasClusterReconciledToDesiredK8Version(ctx context.Context, cli client.Client, clusterName string, +// kcpList *kcpv1.KubeadmControlPlaneList, mdList *clusterv1.MachineDeploymentList, expectedVersion string) (bool, error) { +// +// for _, kcp := range kcpList.Items { +// machines, err := getAllMachinesInKCP(ctx, cli, kcp, clusterName) +// if err != nil { +// return false, fmt.Errorf("failed to fetch machines for the kubeadm control plane object [%s] for cluster [%s]: [%v]", kcp.Name, clusterName, err) +// } +// for _, machine := range machines { +// if machine.Spec.Version != nil && *machine.Spec.Version != expectedVersion { +// return false, nil +// } +// } +// } +// +// for _, md := range mdList.Items { +// machineList, err := getAllMachinesInMachineDeployment(ctx, cli, md) +// if err != nil { +// return false, fmt.Errorf("failed to fetch machines for the machine deployment [%s] for cluster [%s]: [%v]", md.Name, clusterName, err) +// } +// for _, machine := range machineList.Items { +// if machine.Spec.Version != nil && *machine.Spec.Version != expectedVersion { +// return false, nil +// } +// } +// } +// return true, nil +// } diff --git a/controllers/cluster_scripts/cloud_init.tmpl b/controllers/cluster_scripts/cloud_init.tmpl index a0b49271..3e8b7741 100644 --- a/controllers/cluster_scripts/cloud_init.tmpl +++ b/controllers/cluster_scripts/cloud_init.tmpl @@ -3,19 +3,6 @@ users: - name: root lock_passwd: false write_files: -- path: /etc/cloud/cloud.cfg.d/cse.cfg - owner: root - content: | - ssh_deletekeys: false -- path: /opt/vmware/cloud-director/metering.sh - owner: root - content: | - #!/usr/bin/env bash - vmtoolsd --cmd "info-set guestinfo.metering.vcd_site_id $VCD_SITE_ID" - vmtoolsd --cmd "info-set guestinfo.metering.cluster_id $CLUSTER_ID" - vmtoolsd --cmd "info-set guestinfo.metering.tkg_version $TKG_VERSION" - vmtoolsd --cmd "info-set guestinfo.metering.machine_type $MACHINE_TYPE" - vmtoolsd --cmd "info-set guestinfo.metering.mgmt $MGMT" - path: /etc/vcloud/metering owner: root content: | @@ -24,143 +11,8 @@ write_files: TKG_VERSION={{ .TKGVersion }} MACHINE_TYPE={{- if or .ControlPlane .ResizedControlPlane -}} control_plane {{- else -}} worker {{- end }} MGMT=true -- path: /etc/systemd/system/metering.service - owner: root - content: | - [Service] - Type=simple - EnvironmentFile=/etc/vcloud/metering - ExecStart=/bin/bash /opt/vmware/cloud-director/metering.sh - - [Install] - WantedBy=multi-user.target -- path: /root/ {{- if .ControlPlane -}} control_plane {{- else -}} node {{- end -}} .sh - owner: root - content: | - #!/usr/bin/env bash - catch() { - vmtoolsd --cmd "info-set guestinfo.post_customization_script_execution_status $?" - ERROR_MESSAGE="$(date) $(caller): $BASH_COMMAND" - echo "$ERROR_MESSAGE" &>> /var/log/capvcd/customization/error.log - if [[ -s /root/kubeadm.err ]] - then - KUBEADM_FAILURE=$(cat /root/kubeadm.err) - ERROR_MESSAGE="$ERROR_MESSAGE $KUBEADM_FAILURE" - fi - vmtoolsd --cmd "info-set guestinfo.post_customization_script_execution_failure_reason $ERROR_MESSAGE" - - CLOUD_INIT_OUTPUT="" - if [[ -f /var/log/cloud-init-output.log ]] - then - CLOUD_INIT_OUTPUT=$(> /var/log/capvcd/customization/status.log {{- if .ControlPlane }} - - VCLOUD_BASIC_AUTH_PATH=/root/vcloud-basic-auth.yaml - VCLOUD_CONFIGMAP_PATH=/root/vcloud-configmap.yaml - VCLOUD_CCM_PATH=/root/cloud-director-ccm.yaml - VCLOUD_CSI_CONFIGMAP_PATH=/root/vcloud-csi-configmap.yaml - CSI_DRIVER_PATH=/root/csi-driver.yaml - CSI_CONTROLLER_PATH=/root/csi-controller.yaml - CSI_NODE_PATH=/root/csi-node.yaml {{- end }} - - vmtoolsd --cmd "info-set guestinfo.postcustomization.networkconfiguration.status in_progress" - echo 'net.ipv6.conf.all.disable_ipv6 = 1' >> /etc/sysctl.conf - echo 'net.ipv6.conf.default.disable_ipv6 = 1' >> /etc/sysctl.conf - echo 'net.ipv6.conf.lo.disable_ipv6 = 1' >> /etc/sysctl.conf - sudo sysctl -p - # also remove ipv6 localhost entry from /etc/hosts - sed -i 's/::1/127.0.0.1/g' /etc/hosts || true - vmtoolsd --cmd "info-set guestinfo.postcustomization.networkconfiguration.status successful" - - vmtoolsd --cmd "info-set guestinfo.metering.status in_progress" - systemctl enable --now metering - vmtoolsd --cmd "info-set guestinfo.metering.status successful" {{- if or .HTTPProxy .HTTPSProxy }} - - vmtoolsd --cmd "info-set guestinfo.postcustomization.proxy.setting.status in_progress" - export HTTP_PROXY="{{.HTTPProxy}}" - export HTTPS_PROXY="{{.HTTPSProxy}}" - export http_proxy="{{.HTTPProxy}}" - export https_proxy="{{.HTTPSProxy}}" - export NO_PROXY="{{.NoProxy}}" - export no_proxy="{{.NoProxy}}" - cat < /etc/systemd/system/containerd.service.d/http-proxy.conf - [Service] - Environment="HTTP_PROXY={{.HTTPProxy}}" - Environment="HTTPS_PROXY={{.HTTPSProxy}}" - Environment="http_proxy={{.HTTPProxy}}" - Environment="https_proxy={{.HTTPSProxy}}" - Environment="no_proxy={{.NoProxy}}" - Environment="NO_PROXY={{.NoProxy}}" - END - systemctl daemon-reload - systemctl restart containerd - wait_for_containerd_startup - vmtoolsd --cmd "info-set guestinfo.postcustomization.proxy.setting.status successful" {{- end }} - - vmtoolsd --cmd "info-set {{ if .ControlPlane -}} guestinfo.postcustomization.kubeinit.status {{- else -}} guestinfo.postcustomization.kubeadm.node.join.status {{- end }} in_progress" - for IMAGE in "coredns" "etcd" "kube-proxy" "kube-apiserver" "kube-controller-manager" "kube-scheduler" - do - IMAGE_REF=$(ctr -n=k8s.io image list | cut -d" " -f1 | grep $IMAGE) - REF_PATH=$(echo $IMAGE_REF | sed 's/:.*//') - NEW_TAG_VERSION=$(echo $IMAGE_REF | sed 's/.*://' | sed 's/_/-/') - ctr -n=k8s.io image tag $IMAGE_REF $REF_PATH:$NEW_TAG_VERSION - done - set +x - { - {{ .BootstrapRunCmd }} - } 2> /root/kubeadm.err - set -x - if [[ ! -f /run/cluster-api/bootstrap-success.complete ]] - then - echo "file /run/cluster-api/bootstrap-success.complete not found" &>> /var/log/capvcd/customization/error.log - exit 1 - fi - vmtoolsd --cmd "info-set {{ if .ControlPlane -}} guestinfo.postcustomization.kubeinit.status {{- else -}} guestinfo.postcustomization.kubeadm.node.join.status {{- end }} successful" - - echo "$(date) post customization script execution completed" &>> /var/log/capvcd/customization/status.log - exit 0 -runcmd: -- 'cloud-init clean' -- '[ ! -f /opt/vmware/cloud-director/metering.sh ] && sudo reboot' -- '[ ! -f /etc/cloud/cloud.cfg.d/cse.cfg ] && sudo reboot' -- '[ ! -f /etc/vcloud/metering ] && sudo reboot' -{{ if .ControlPlane }} -- '[ ! -f /root/control_plane.sh ] && sudo reboot' -- '[ ! -f /run/kubeadm/kubeadm.yaml ] && sudo reboot' -- bash /root/control_plane.sh -{{ else }} -- '[ ! -f /root/node.sh ] && sudo reboot' -- '[ ! -f /run/kubeadm/kubeadm-join-config.yaml ] && sudo reboot' -- bash /root/node.sh -{{ end }} timezone: UTC disable_root: false preserve_hostname: false hostname: "{{ .MachineName }}" -final_message: "The system is ready after $UPTIME seconds" +final_message: "The system is ready after $UPTIME seconds" \ No newline at end of file diff --git a/controllers/cluster_scripts/cloud_init_network_configuration.tmpl b/controllers/cluster_scripts/cloud_init_network_configuration.tmpl new file mode 100644 index 00000000..d67f4a62 --- /dev/null +++ b/controllers/cluster_scripts/cloud_init_network_configuration.tmpl @@ -0,0 +1,25 @@ +network: + version: 1 + config: + {{- range $section := . }} + - type: physical + name: {{$section.Network}} + mac_address: {{$section.MACAddress}} + subnets: + - type: static + address: {{$section.IPAddress}}/{{$section.NetmaskCidr}} + dns_nameservers: + {{- if $section.DNS1 }} + - {{ $section.DNS1 }} + {{- end -}} + {{- if $section.DNS2 }} + - {{ $section.DNS2 }} + {{- end -}} + {{- if eq $section.Primary true }} + gateway: {{ $section.Gateway }} + routes: + - gateway: {{$section.Gateway}} + destination: "0.0.0.0" + netmask: "0" + {{- end }} + {{- end -}} \ No newline at end of file diff --git a/controllers/vcdcluster_controller.go b/controllers/vcdcluster_controller.go index a9f172ba..f5c426eb 100644 --- a/controllers/vcdcluster_controller.go +++ b/controllers/vcdcluster_controller.go @@ -31,9 +31,9 @@ import ( "github.com/vmware/go-vcloud-director/v2/types/v56" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog" + // "k8s.io/klog" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kcpv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + // kcpv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" kcfg "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -87,10 +87,8 @@ type VCDClusterReconciler struct { //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vcdmachines,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vcdmachines/status,verbs=get;update;patch //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vcdmachines/finalizers,verbs=update -//+kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vcdmachinetemplates,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigtemplates,verbs=get;list;watch;create;update;patch;delete func (r *VCDClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) { log := ctrl.LoggerFrom(ctx) @@ -300,26 +298,26 @@ func (r *VCDClusterReconciler) constructCapvcdRDE(ctx context.Context, cluster * if vcdOrg == nil { return nil, fmt.Errorf("org cannot be nil") } - kcpList, err := getAllKubeadmControlPlaneForCluster(ctx, r.Client, *cluster) - if err != nil { - return nil, fmt.Errorf("error getting KubeadmControlPlane objects for cluster [%s]: [%v]", vcdCluster.Name, err) - } + // kcpList, err := getAllKubeadmControlPlaneForCluster(ctx, r.Client, *cluster) + // if err != nil { + // return nil, fmt.Errorf("error getting KubeadmControlPlane objects for cluster [%s]: [%v]", vcdCluster.Name, err) + // } // we assume that there is only one kcp object for a cluster. // TODO: need to update the logic for multiple kcp objects in the cluster kubernetesVersion := "" - for _, kcp := range kcpList.Items { - kubernetesVersion = kcp.Spec.Version - } - - mdList, err := getAllMachineDeploymentsForCluster(ctx, r.Client, *cluster) - if err != nil { - return nil, fmt.Errorf("error getting all machine deployment objects for the cluster [%s]: [%v]", vcdCluster.Name, err) - } - ready, err := hasClusterReconciledToDesiredK8Version(ctx, r.Client, vcdCluster.Name, kcpList, mdList, kubernetesVersion) - if err != nil { - return nil, fmt.Errorf("error occurred while determining the value for the ready flag for cluster [%s]: [%v]", vcdCluster.Name, err) - } + // for _, kcp := range kcpList.Items { + // kubernetesVersion = kcp.Spec.Version + // } + // + // mdList, err := getAllMachineDeploymentsForCluster(ctx, r.Client, *cluster) + // if err != nil { + // return nil, fmt.Errorf("error getting all machine deployment objects for the cluster [%s]: [%v]", vcdCluster.Name, err) + // } + // ready, err := hasClusterReconciledToDesiredK8Version(ctx, r.Client, vcdCluster.Name, kcpList, mdList, kubernetesVersion) + // if err != nil { + // return nil, fmt.Errorf("error occurred while determining the value for the ready flag for cluster [%s]: [%v]", vcdCluster.Name, err) + // } orgList := []rdeType.Org{ rdeType.Org{ @@ -384,7 +382,8 @@ func (r *VCDClusterReconciler) constructCapvcdRDE(ctx context.Context, cluster * TkgVersion: getTKGVersion(cluster), }, Previous: nil, - Ready: ready, + // Ready: ready, + Ready: true, }, }, }, @@ -443,7 +442,7 @@ func (r *VCDClusterReconciler) reconcileRDE(ctx context.Context, cluster *cluste return fmt.Errorf("found nil org when getting org by name [%s]", vcdCluster.Spec.Org) } capvcdRdeManager := capisdk.NewCapvcdRdeManager(workloadVCDClient, vcdCluster.Status.InfraId) - _, capvcdSpec, capvcdMetadata, capvcdStatus, err := capvcdRdeManager.GetCAPVCDEntity(ctx, vcdCluster.Status.InfraId) + _, _, capvcdMetadata, capvcdStatus, err := capvcdRdeManager.GetCAPVCDEntity(ctx, vcdCluster.Status.InfraId) if err != nil { return fmt.Errorf("failed to get RDE with ID [%s] for cluster [%s]: [%v]", vcdCluster.Status.InfraId, vcdCluster.Name, err) } @@ -464,25 +463,25 @@ func (r *VCDClusterReconciler) reconcileRDE(ctx context.Context, cluster *cluste } specPatch := make(map[string]interface{}) - kcpList, err := getAllKubeadmControlPlaneForCluster(ctx, r.Client, *cluster) - if err != nil { - return fmt.Errorf("error getting all KubeadmControlPlane objects for cluster [%s]: [%v]", vcdCluster.Name, err) - } - - mdList, err := getAllMachineDeploymentsForCluster(ctx, r.Client, *cluster) - if err != nil { - return fmt.Errorf("error getting all MachineDeployment objects for cluster [%s]: [%v]", vcdCluster.Name, err) - } - - kubernetesSpecVersion := "" - var kcpObj *kcpv1.KubeadmControlPlane - // we assume that there is only one kcp object for a cluster. - // TODO: need to update the logic for multiple kcp objects in the cluster - if len(kcpList.Items) > 0 { - kcpObj = &kcpList.Items[0] - kubernetesSpecVersion = kcpObj.Spec.Version // for RDE updates, consider only the first kcp object - } - tkgVersion := getTKGVersion(cluster) + // kcpList, err := getAllKubeadmControlPlaneForCluster(ctx, r.Client, *cluster) + // if err != nil { + // return fmt.Errorf("error getting all KubeadmControlPlane objects for cluster [%s]: [%v]", vcdCluster.Name, err) + // } + // + // mdList, err := getAllMachineDeploymentsForCluster(ctx, r.Client, *cluster) + // if err != nil { + // return fmt.Errorf("error getting all MachineDeployment objects for cluster [%s]: [%v]", vcdCluster.Name, err) + // } + // + // kubernetesSpecVersion := "" + // var kcpObj *kcpv1.KubeadmControlPlane + // // we assume that there is only one kcp object for a cluster. + // // TODO: need to update the logic for multiple kcp objects in the cluster + // if len(kcpList.Items) > 0 { + // kcpObj = &kcpList.Items[0] + // kubernetesSpecVersion = kcpObj.Spec.Version // for RDE updates, consider only the first kcp object + // } + // tkgVersion := getTKGVersion(cluster) crsBindingList, err := getAllCRSBindingForCluster(ctx, r.Client, *cluster) if err != nil { // this is fundamentally not a mandatory field @@ -490,15 +489,15 @@ func (r *VCDClusterReconciler) reconcileRDE(ctx context.Context, cluster *cluste } // UI can create CAPVCD clusters in future which can populate capiYaml in RDE.Spec, so we only want to populate if capiYaml is empty - if capvcdSpec.CapiYaml == "" { - capiYaml, err := getCapiYaml(ctx, r.Client, *cluster, *vcdCluster) - if err != nil { - log.Error(err, - "error during RDE reconciliation: failed to construct capi yaml from kubernetes resources of cluster") - } else { - specPatch["CapiYaml"] = capiYaml - } - } + // if capvcdSpec.CapiYaml == "" { + // capiYaml, err := getCapiYaml(ctx, r.Client, *cluster, *vcdCluster) + // if err != nil { + // log.Error(err, + // "error during RDE reconciliation: failed to construct capi yaml from kubernetes resources of cluster") + // } else { + // specPatch["CapiYaml"] = capiYaml + // } + // } // Updating status portion of the RDE in the following code capvcdStatusPatch := make(map[string]interface{}) @@ -507,35 +506,35 @@ func (r *VCDClusterReconciler) reconcileRDE(ctx context.Context, cluster *cluste } upgradeObject := capvcdStatus.Upgrade - var ready bool - ready, err = hasClusterReconciledToDesiredK8Version(ctx, r.Client, vcdCluster.Name, kcpList, mdList, kubernetesSpecVersion) - if err != nil { - return fmt.Errorf("failed to determine the value for ready flag for upgrades for cluster [%s(%s)]: [%v]", - vcdCluster.Name, vcdCluster.Status.InfraId, err) - } - - if kcpObj != nil { - if upgradeObject.Current == nil { - upgradeObject = rdeType.Upgrade{ - Current: &rdeType.K8sInfo{ - K8sVersion: kubernetesSpecVersion, - TkgVersion: tkgVersion, - }, - Previous: nil, - Ready: ready, - } - } else { - if kcpObj.Spec.Version != capvcdStatus.Upgrade.Current.K8sVersion { - - upgradeObject.Previous = upgradeObject.Current - upgradeObject.Current = &rdeType.K8sInfo{ - K8sVersion: kubernetesSpecVersion, - TkgVersion: tkgVersion, - } - } - upgradeObject.Ready = ready - } - } + // var ready bool + // ready, err = hasClusterReconciledToDesiredK8Version(ctx, r.Client, vcdCluster.Name, kcpList, mdList, kubernetesSpecVersion) + // if err != nil { + // return fmt.Errorf("failed to determine the value for ready flag for upgrades for cluster [%s(%s)]: [%v]", + // vcdCluster.Name, vcdCluster.Status.InfraId, err) + // } + // + // if kcpObj != nil { + // if upgradeObject.Current == nil { + // upgradeObject = rdeType.Upgrade{ + // Current: &rdeType.K8sInfo{ + // K8sVersion: kubernetesSpecVersion, + // TkgVersion: tkgVersion, + // }, + // Previous: nil, + // Ready: ready, + // } + // } else { + // if kcpObj.Spec.Version != capvcdStatus.Upgrade.Current.K8sVersion { + // + // upgradeObject.Previous = upgradeObject.Current + // upgradeObject.Current = &rdeType.K8sInfo{ + // K8sVersion: kubernetesSpecVersion, + // TkgVersion: tkgVersion, + // } + // } + // upgradeObject.Ready = ready + // } + // } log.V(4).Info("upgrade section of the RDE", "previous", capvcdStatus.Upgrade, "current", upgradeObject) @@ -544,9 +543,9 @@ func (r *VCDClusterReconciler) reconcileRDE(ctx context.Context, cluster *cluste } // TODO: Delete "kubernetes" string in RDE. Discuss with Sahithi - if capvcdStatus.Kubernetes != kubernetesSpecVersion { - capvcdStatusPatch["Kubernetes"] = kubernetesSpecVersion - } + // if capvcdStatus.Kubernetes != kubernetesSpecVersion { + // capvcdStatusPatch["Kubernetes"] = kubernetesSpecVersion + // } if capvcdStatus.Uid != vcdCluster.Status.InfraId { capvcdStatusPatch["Uid"] = vcdCluster.Status.InfraId @@ -561,13 +560,13 @@ func (r *VCDClusterReconciler) reconcileRDE(ctx context.Context, cluster *cluste capvcdStatusPatch["UseAsManagementCluster"] = vcdCluster.Status.UseAsManagementCluster } // fill CAPIStatusYaml - capiStatusYaml, err := getCapiStatusYaml(ctx, r.Client, *cluster, *vcdCluster) - if err != nil { - log.Error(err, "failed to populate capiStatusYaml in RDE", "rdeID", vcdCluster.Status.InfraId) - } - if capvcdStatus.CapiStatusYaml != capiStatusYaml { - capvcdStatusPatch["CapiStatusYaml"] = capiStatusYaml - } + // capiStatusYaml, err := getCapiStatusYaml(ctx, r.Client, *cluster, *vcdCluster) + // if err != nil { + // log.Error(err, "failed to populate capiStatusYaml in RDE", "rdeID", vcdCluster.Status.InfraId) + // } + // if capvcdStatus.CapiStatusYaml != capiStatusYaml { + // capvcdStatusPatch["CapiStatusYaml"] = capiStatusYaml + // } pods := rdeType.Pods{ CidrBlocks: cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, @@ -628,13 +627,13 @@ func (r *VCDClusterReconciler) reconcileRDE(ctx context.Context, cluster *cluste } // update node status. Needed to remove stray nodes which were already deleted - nodePoolList, err := getNodePoolList(ctx, r.Client, *cluster) - if err != nil { - klog.Errorf("failed to get node pool list from cluster [%s]: [%v]", cluster.Name, err) - } - if !reflect.DeepEqual(nodePoolList, capvcdStatus.NodePool) { - capvcdStatusPatch["NodePool"] = nodePoolList - } + // nodePoolList, err := getNodePoolList(ctx, r.Client, *cluster) + // if err != nil { + // klog.Errorf("failed to get node pool list from cluster [%s]: [%v]", cluster.Name, err) + // } + // if !reflect.DeepEqual(nodePoolList, capvcdStatus.NodePool) { + // capvcdStatusPatch["NodePool"] = nodePoolList + // } ovdcList := []rdeType.Ovdc{ rdeType.Ovdc{ diff --git a/controllers/vcdmachine_controller.go b/controllers/vcdmachine_controller.go index 944086aa..b3e30ef0 100644 --- a/controllers/vcdmachine_controller.go +++ b/controllers/vcdmachine_controller.go @@ -12,6 +12,7 @@ import ( b64 "encoding/base64" "fmt" "math" + "net" "reflect" "strconv" "strings" @@ -64,6 +65,17 @@ type CloudInitScriptInput struct { ClusterID string // cluster id } +type IgnitionNetworkInitScriptSectionInput struct { + Primary bool + Network string + IPAddress string + MACAddress string + NetmaskCidr int + Gateway string + DNS1 string + DNS2 string +} + const ( ReclaimPolicyDelete = "Delete" ReclaimPolicyRetain = "Retain" @@ -79,6 +91,9 @@ const Mebibyte = 1048576 //go:embed cluster_scripts/cloud_init.tmpl var cloudInitScriptTemplate string +//go:embed cluster_scripts/cloud_init_network_configuration.tmpl +var CloudInitNetworkInitScriptTemplate string + // VCDMachineReconciler reconciles a VCDMachine object type VCDMachineReconciler struct { client.Client @@ -206,11 +221,11 @@ func patchVCDMachine(ctx context.Context, patchHelper *patch.Helper, vcdMachine } const ( - NetworkConfiguration = "guestinfo.postcustomization.networkconfiguration.status" - ProxyConfiguration = "guestinfo.postcustomization.proxy.setting.status" - MeteringConfiguration = "guestinfo.metering.status" - KubeadmInit = "guestinfo.postcustomization.kubeinit.status" - KubeadmNodeJoin = "guestinfo.postcustomization.kubeadm.node.join.status" + NetworkConfiguration = "guestinfo.postcustomization.networkconfiguration.status" + ProxyConfiguration = "guestinfo.postcustomization.proxy.setting.status" + MeteringConfiguration = "guestinfo.metering.status" + // KubeadmInit = "guestinfo.postcustomization.kubeinit.status" + // KubeadmNodeJoin = "guestinfo.postcustomization.kubeadm.node.join.status" PostCustomizationScriptExecutionStatus = "guestinfo.post_customization_script_execution_status" PostCustomizationScriptFailureReason = "guestinfo.post_customization_script_execution_failure_reason" ) @@ -474,12 +489,11 @@ func (r *VCDMachineReconciler) reconcileNormal(ctx context.Context, cluster *clu // run `kubeadm join`. The joining control planes run `kubeadm join`, so these nodes use the join script. // Although it is sufficient to just check if `kubeadm join` is in the bootstrap script, using the // isControlPlaneMachine function is a simpler operation, so this function is called first. - useControlPlaneScript := util.IsControlPlaneMachine(machine) && - !strings.Contains(bootstrapJinjaScript, "kubeadm join") + useControlPlaneScript := util.IsControlPlaneMachine(machine) // && !strings.Contains(bootstrapJinjaScript, "kubeadm join") // Scaling up Control Plane initially creates the nodes as worker, which eventually joins the original control plane // Hence we are checking if it contains the control plane label and has kubeadm join in the script - isResizedControlPlane := util.IsControlPlaneMachine(machine) && strings.Contains(bootstrapJinjaScript, "kubeadm join") + isResizedControlPlane := util.IsControlPlaneMachine(machine) // && strings.Contains(bootstrapJinjaScript, "kubeadm join") // Construct a CloudInitScriptInput struct to pass into template.Execute() function to generate the necessary // cloud init script for the relevant node type, i.e. control plane or worker node @@ -753,10 +767,13 @@ func (r *VCDMachineReconciler) reconcileNormal(ctx context.Context, cluster *clu if vmStatus != "POWERED_ON" { // try to power on the VM b64CloudInitScript := b64.StdEncoding.EncodeToString(mergedCloudInitBytes) + b64NetworkMetadata, err := generateNetworkInitializationScriptForCloudInitB64(vm.VM.NetworkConnectionSection, vdcManager) keyVals := map[string]string{ "guestinfo.userdata": b64CloudInitScript, "guestinfo.userdata.encoding": "base64", "disk.enableUUID": "1", + "guestinfo.metadata": b64NetworkMetadata, + "guestinfo.metadata.encoding": "base64", } for key, val := range keyVals { @@ -834,40 +851,40 @@ func (r *VCDMachineReconciler) reconcileNormal(ctx context.Context, cluster *clu log.Error(err, "failed to remove VCDMachineCreationError from RDE", "rdeID", vcdCluster.Status.InfraId) } - phases := postCustPhases - if useControlPlaneScript { - phases = append(phases, KubeadmInit) - } else { - phases = append(phases, KubeadmNodeJoin) - } - - if vcdCluster.Spec.ProxyConfigSpec.HTTPSProxy == "" && - vcdCluster.Spec.ProxyConfigSpec.HTTPProxy == "" { - phases = removeFromSlice(ProxyConfiguration, phases) - } - - for _, phase := range phases { - if err = vApp.Refresh(); err != nil { - err1 := capvcdRdeManager.AddToErrorSet(ctx, capisdk.VCDMachineScriptExecutionError, "", machine.Name, fmt.Sprintf("%v", err)) - if err1 != nil { - log.Error(err1, "failed to add VCDMachineScriptExecutionError into RDE", "rdeID", vcdCluster.Status.InfraId) - } - return ctrl.Result{}, - errors.Wrapf(err, "Error while bootstrapping the machine [%s/%s]; unable to refresh vapp", - vAppName, vm.VM.Name) - } - log.Info(fmt.Sprintf("Start: waiting for the bootstrapping phase [%s] to complete", phase)) - if err = r.waitForPostCustomizationPhase(ctx, workloadVCDClient, vm, phase); err != nil { - log.Error(err, fmt.Sprintf("Error waiting for the bootstrapping phase [%s] to complete", phase)) - err1 := capvcdRdeManager.AddToErrorSet(ctx, capisdk.VCDMachineScriptExecutionError, "", machine.Name, fmt.Sprintf("%v", err)) - if err1 != nil { - log.Error(err1, "failed to add VCDMachineScriptExecutionError into RDE", "rdeID", vcdCluster.Status.InfraId) - } - return ctrl.Result{}, errors.Wrapf(err, "Error while bootstrapping the machine [%s/%s]; unable to wait for post customization phase [%s]", - vAppName, vm.VM.Name, phase) - } - log.Info(fmt.Sprintf("End: waiting for the bootstrapping phase [%s] to complete", phase)) - } + // phases := postCustPhases + // // if useControlPlaneScript { + // // phases = append(phases, KubeadmInit) + // // } else { + // // phases = append(phases, KubeadmNodeJoin) + // // } + + // if vcdCluster.Spec.ProxyConfigSpec.HTTPSProxy == "" && + // vcdCluster.Spec.ProxyConfigSpec.HTTPProxy == "" { + // phases = removeFromSlice(ProxyConfiguration, phases) + // } + + // for _, phase := range phases { + // if err = vApp.Refresh(); err != nil { + // err1 := capvcdRdeManager.AddToErrorSet(ctx, capisdk.VCDMachineScriptExecutionError, "", machine.Name, fmt.Sprintf("%v", err)) + // if err1 != nil { + // log.Error(err1, "failed to add VCDMachineScriptExecutionError into RDE", "rdeID", vcdCluster.Status.InfraId) + // } + // return ctrl.Result{}, + // errors.Wrapf(err, "Error while bootstrapping the machine [%s/%s]; unable to refresh vapp", + // vAppName, vm.VM.Name) + // } + // log.Info(fmt.Sprintf("Start: waiting for the bootstrapping phase [%s] to complete", phase)) + // if err = r.waitForPostCustomizationPhase(ctx, workloadVCDClient, vm, phase); err != nil { + // log.Error(err, fmt.Sprintf("Error waiting for the bootstrapping phase [%s] to complete", phase)) + // err1 := capvcdRdeManager.AddToErrorSet(ctx, capisdk.VCDMachineScriptExecutionError, "", machine.Name, fmt.Sprintf("%v", err)) + // if err1 != nil { + // log.Error(err1, "failed to add VCDMachineScriptExecutionError into RDE", "rdeID", vcdCluster.Status.InfraId) + // } + // return ctrl.Result{}, errors.Wrapf(err, "Error while bootstrapping the machine [%s/%s]; unable to wait for post customization phase [%s]", + // vAppName, vm.VM.Name, phase) + // } + // log.Info(fmt.Sprintf("End: waiting for the bootstrapping phase [%s] to complete", phase)) + // } err = capvcdRdeManager.RdeManager.RemoveErrorByNameOrIdFromErrorSet(ctx, vcdsdk.ComponentCAPVCD, capisdk.VCDMachineScriptExecutionError, "", "") if err != nil { @@ -1559,6 +1576,7 @@ func MergeJinjaToCloudInitScript(cloudInitConfig CloudInitScriptInput, jinjaConf "preserve_hostname", "hostname", "final_message", + "ntp", } { val, ok := mergedCloudInit[key] if !ok { @@ -1578,3 +1596,46 @@ func MergeJinjaToCloudInitScript(cloudInitConfig CloudInitScriptInput, jinjaConf return out, nil } + +// generateNetworkInitializationScriptForCloudInitB64 creates the bash script that will create the networkd units stored in metadata +// and consumed by cloudinit +func generateNetworkInitializationScriptForCloudInitB64(networkConnection *types.NetworkConnectionSection, vdcManager *vcdsdk.VdcManager) (string, error) { + CloudInitNetworkInitTemplate, err := template.New("cloud_init_network_init_script_template").Parse(CloudInitNetworkInitScriptTemplate) + if err != nil { + return "", errors.Wrapf(err, "Error parsing CloudInitNetworkInitScriptTemplate [%s]", CloudInitNetworkInitScriptTemplate) + } + + var sectionInputConfigs []IgnitionNetworkInitScriptSectionInput + for _, network := range networkConnection.NetworkConnection { + // Process NIC network properties and subnet CIDR + orgVdcNetwork, err := vdcManager.Vdc.GetOrgVdcNetworkByName(network.Network, true) + if err != nil { + return "", err + } + + ipScope := orgVdcNetwork.OrgVDCNetwork.Configuration.IPScopes.IPScope[0] + netmask := net.ParseIP(ipScope.Netmask) + netmaskCidr, _ := net.IPMask(netmask.To4()).Size() + + sectionInputConfigs = append(sectionInputConfigs, IgnitionNetworkInitScriptSectionInput{ + Primary: network.NetworkConnectionIndex == networkConnection.PrimaryNetworkConnectionIndex, + Network: network.Network, + IPAddress: network.IPAddress, + MACAddress: network.MACAddress, + NetmaskCidr: netmaskCidr, + Gateway: ipScope.Gateway, + DNS1: ipScope.DNS1, + DNS2: ipScope.DNS2, + }) + } + + buff := bytes.Buffer{} + if err = CloudInitNetworkInitTemplate.Execute(&buff, sectionInputConfigs); err != nil { + return "", errors.Wrapf(err, "Error rendering Cloud init network init template: [%s]", CloudInitNetworkInitTemplate.Name()) + } + + networkMetadata := buff.String() // Assuming this is the string you want to encode + encoded := b64.StdEncoding.EncodeToString([]byte(networkMetadata)) + + return encoded, nil +} diff --git a/main.go b/main.go index d6c71651..b5f8607b 100644 --- a/main.go +++ b/main.go @@ -24,7 +24,8 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog" clusterv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + + // bootstrapv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -36,7 +37,7 @@ import ( // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" - kcpv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + // kcpv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -48,6 +49,7 @@ var ( enableLeaderElection bool probeAddr string + profilerAddress string syncPeriod time.Duration concurrency int diagnosticsOptions flags.DiagnosticsOptions @@ -62,14 +64,15 @@ func init() { // core CAPI objects using v1beta1 using the available webhook conversion. Hence, v1beta1 support in core CAPI is // mandatory. utilruntime.Must(clusterv1beta1.AddToScheme(myscheme)) - utilruntime.Must(kcpv1beta1.AddToScheme(myscheme)) - utilruntime.Must(bootstrapv1beta1.AddToScheme(myscheme)) + // utilruntime.Must(kcpv1beta1.AddToScheme(myscheme)) + // utilruntime.Must(bootstrapv1beta1.AddToScheme(myscheme)) // We need the addonsv1 scheme in order to list the ClusterResourceSetBindings addon. utilruntime.Must(addonsv1.AddToScheme(myscheme)) } func initFlags(fs *pflag.FlagSet) { fs.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + fs.StringVar(&profilerAddress, "profiler-address", ":6060", "Bind address to expose the pprof profiler (e.g. localhost:6060)") fs.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") @@ -105,6 +108,7 @@ func main() { Scheme: myscheme, Metrics: flags.GetDiagnosticsOptions(diagnosticsOptions), HealthProbeBindAddress: probeAddr, + PprofBindAddress: profilerAddress, LeaderElection: enableLeaderElection, LeaderElectionID: "capvcd-controller-manager-leader-election", Cache: cache.Options{ diff --git a/tests/e2e/utils/utils.go b/tests/e2e/utils/utils.go index a35645ee..eb771da0 100644 --- a/tests/e2e/utils/utils.go +++ b/tests/e2e/utils/utils.go @@ -14,7 +14,7 @@ const ( VCDCluster = "VCDCluster" MachineDeployment = "MachineDeployment" Cluster = "Cluster" - KubeadmControlPlane = "KubeadmControlPlane" + // KubeadmControlPlane = "KubeadmControlPlane" VCDMachineTemplate = "VCDMachineTemplate" SECRET = "Secret" etcdServerRequestTimeoutErr = "etcdserver: request timed out" diff --git a/tilt-provider.json b/tilt-provider.json new file mode 100644 index 00000000..0d37a2fa --- /dev/null +++ b/tilt-provider.json @@ -0,0 +1,16 @@ +{ + "name": "capvcd", + "config": { + "image": "projects.packages.broadcom.com/vmware-cloud-director/cluster-api-provider-cloud-director", + "live_reload_deps": [ + "main.go", + "go.mod", + "go.sum", + "api", + "cloud", + "controllers", + "exp" + ], + "label": "CAPVCD" + } +} \ No newline at end of file