diff --git a/pkg/syncer/admissionhandler/cnscsi_admissionhandler.go b/pkg/syncer/admissionhandler/cnscsi_admissionhandler.go index 12fcd686a2..18bac2885d 100644 --- a/pkg/syncer/admissionhandler/cnscsi_admissionhandler.go +++ b/pkg/syncer/admissionhandler/cnscsi_admissionhandler.go @@ -38,6 +38,8 @@ const ( DefaultWebhookPort = 9883 DefaultWebhookMetricsBindAddress = "0" devopsUserLabelKey = "cns.vmware.com/user-created" + vmNameLabelKey = "cns.vmware.com/vm-name" + pvcNameLabelKey = "cns.vmware.com/pvc-name" ) var ( @@ -268,11 +270,14 @@ func (h *CSISupervisorMutationWebhook) mutateNewCnsFileAccessConfig(ctx context. if newCnsFileAccessConfig.Labels == nil { newCnsFileAccessConfig.Labels = make(map[string]string) } - if _, ok := newCnsFileAccessConfig.Labels[devopsUserLabelKey]; ok { - log.Debugf("Devops label already present on instance %s", newCnsFileAccessConfig.Name) - return admission.Allowed("") - } + + // Add VM name and PVC name label. + // If someone created this CR with these labels already present, CSI will overrite on them + // with the correct values. newCnsFileAccessConfig.Labels[devopsUserLabelKey] = "true" + newCnsFileAccessConfig.Labels[vmNameLabelKey] = newCnsFileAccessConfig.Spec.VMName + newCnsFileAccessConfig.Labels[pvcNameLabelKey] = newCnsFileAccessConfig.Spec.PvcName + newRawCnsFileAccessConfig, err := json.Marshal(newCnsFileAccessConfig) if err != nil { return admission.Errored(http.StatusInternalServerError, err) diff --git a/pkg/syncer/admissionhandler/validate_cnsfileaccessconfig.go b/pkg/syncer/admissionhandler/validate_cnsfileaccessconfig.go index ba65f7f364..92d62bb410 100644 --- a/pkg/syncer/admissionhandler/validate_cnsfileaccessconfig.go +++ b/pkg/syncer/admissionhandler/validate_cnsfileaccessconfig.go @@ -7,7 +7,9 @@ import ( "regexp" admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" cnsoperatorv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" @@ -42,6 +44,25 @@ func validateCreateCnsFileAccessConfig(ctx context.Context, clientConfig *rest.C } } + // This validation is not required for PVCSI service account. + isPvCSIServiceAccount, err := validatePvCSIServiceAccount(req.UserInfo.Username) + if err != nil { + // return AdmissionResponse result + return &admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Message: fmt.Sprintf("failed to validate user information: %s", err), + }, + } + } + + // If user is PVCSI service account, allow this request. + if isPvCSIServiceAccount { + return &admissionv1.AdmissionResponse{ + Allowed: true, + } + } + vm := cnsFileAccessConfig.Spec.VMName pvc := cnsFileAccessConfig.Spec.PvcName namespace := cnsFileAccessConfig.Namespace @@ -87,22 +108,31 @@ func cnsFileAccessConfigAlreadyExists(ctx context.Context, clientConfig *rest.Co return "", err } + // List only that CnsFileAccessConfig CRs which has the same VM name and PVC name labels. + labelSelector := labels.SelectorFromSet(labels.Set{vmNameLabelKey: vm, pvcNameLabelKey: pvc}) // Get the list of all CnsFileAccessConfig CRs in the given namespace. cnsFileAccessConfigList := &cnsfileaccessconfigv1alpha1.CnsFileAccessConfigList{} - err = cnsOperatorClient.List(ctx, cnsFileAccessConfigList, &client.ListOptions{Namespace: namespace}) + err = cnsOperatorClient.List(ctx, cnsFileAccessConfigList, &client.ListOptions{ + Namespace: namespace, + LabelSelector: labelSelector, + }) if err != nil { log.Errorf("failed to list CnsFileAccessConfigList CRs from %s namesapace. Error: %+v", namespace, err) return "", err } - for _, cnsFileAccessConfig := range cnsFileAccessConfigList.Items { - if cnsFileAccessConfig.Spec.VMName == vm { - if cnsFileAccessConfig.Spec.PvcName == pvc { - return cnsFileAccessConfig.Name, nil - } - } + if len(cnsFileAccessConfigList.Items) == 1 { + // There should be only 1 CFC CR with the same VM and PVC + return cnsFileAccessConfigList.Items[0].Name, nil + } + + if len(cnsFileAccessConfigList.Items) > 1 { + // We should never reach here but it's good to have this check. + return "", fmt.Errorf("invalid case, %d CnsFileAccessConfig instances detected "+ + "with the VM %s and PVC %s", len(cnsFileAccessConfigList.Items), vm, pvc) } + return "", nil } diff --git a/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go b/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go index 68956a7f1d..1fbef144cf 100644 --- a/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go +++ b/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Kubernetes Authors. +Copyright 2021-2025 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,8 +18,10 @@ package cnsfileaccessconfig import ( "context" + "errors" "fmt" "reflect" + "strings" "sync" "time" @@ -60,6 +62,11 @@ import ( const ( defaultMaxWorkerThreadsForFileAccessConfig = 10 + vmNameLabelKey = "cns.vmware.com/vm-name" + pvcNameLabelKey = "cns.vmware.com/pvc-name" + capvVmLabelKey = "capv.vmware.com" + capvPvcLabelKey = "TKGService" + devopsUserLabelKey = "cns.vmware.com/user-created" ) // backOffDuration is a map of cnsfileaccessconfig name's to the time after @@ -310,12 +317,25 @@ func (r *ReconcileCnsFileAccessConfig) Reconcile(ctx context.Context, } log.Debugf("Found virtualMachine instance for VM: %q/%q: %+v", instance.Namespace, instance.Spec.VMName, vm) + ifFileVolumesWithVmserviceVmsSupported := commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, + common.FileVolumesWithVmService) + + if ifFileVolumesWithVmserviceVmsSupported { + err = validateVmAndPvc(ctx, instance.Labels, instance.Name, instance.Spec.PvcName, + instance.Namespace, r.client, vm) + if err != nil { + log.Errorf("failed to validate VM %s and PVC %s", vm.Name, instance.Spec.PvcName) + setInstanceError(ctx, r, instance, err.Error()) + return reconcile.Result{RequeueAfter: timeout}, nil + } + } + if instance.DeletionTimestamp != nil { log.Infof("CnsFileAccessConfig instance %q has deletion timestamp set", instance.Name) volumeExists := true volumeID, err := cnsoperatorutil.GetVolumeID(ctx, r.client, instance.Spec.PvcName, instance.Namespace) if err != nil { - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.FileVolumesWithVmService) && + if ifFileVolumesWithVmserviceVmsSupported && apierrors.IsNotFound(err) { log.Infof("Volume ID for PVC %s in namespace %s not found. No need to configure ACL", instance.Spec.PvcName, instance.Namespace) @@ -801,6 +821,66 @@ func (r *ReconcileCnsFileAccessConfig) getVMExternalIP(ctx context.Context, return tkgVMIP, nil } +// validateVmAndPvc validates if the VM and PVC combination given in the instance is correct or not. +// CnsFileAccessConfig CRs created by devpos users will have "cns.vmware.com/user-created", "cns.vmware.com/vm-name" +// and "cns.vmware.com/pvc-name" labels. +// When these labels are present, the PVC and VM must not belong to TKG cluster. +// This is verified by ensuring that: +// The VM does not have a label applied by CAPV - example capv.vmware.com/cluster.name. +// The PVC does not have a label applied by CAPV - example /TKGService +func validateVmAndPvc(ctx context.Context, instanceLabels map[string]string, instanceName string, pvcName string, + namespace string, client client.Client, vm *vmoperatorv1alpha4.VirtualMachine) error { + log := logger.GetLogger(ctx) + + if instanceLabels == nil { + log.Infof("No labels found on the instance %s. Nothing to validate.", instanceName) + return nil + } + + isUserCreatedCnsFileAccessConfig := false + for key := range instanceLabels { + if key == devopsUserLabelKey { + isUserCreatedCnsFileAccessConfig = true + break + } + } + + if !isUserCreatedCnsFileAccessConfig { + log.Infof("Instance %s is not created by devops user. Nothing to validate", instanceName) + return nil + } + + for key := range vm.Labels { + if strings.Contains(key, capvVmLabelKey) { + msg := fmt.Sprintf("CnsFileAccessConfig is created by devops user and has TKG VM %s. "+ + "Invalid combination.", vm.Name) + log.Errorf(msg) + err := errors.New(msg) + return err + } + } + + pvc := &v1.PersistentVolumeClaim{} + err := client.Get(ctx, types.NamespacedName{Name: pvcName, Namespace: namespace}, pvc) + if err != nil { + log.Errorf("failed to get PVC with name %s in namespace %s", pvcName, namespace) + return err + } + + for key := range pvc.Labels { + if strings.Contains(key, capvPvcLabelKey) { + msg := fmt.Sprintf("CnsFileAccessConfig is created by devops user and has "+ + "TKG PVC %s. Invalid combination.", pvcName) + log.Errorf(msg) + err := errors.New(msg) + return err + } + } + + log.Infof("Successfully verified instance %s for VM/PVC combination", instanceName) + return nil +} + // setInstanceSuccess sets instance to success and records an event on the // CnsFileAccessConfig instance. func setInstanceSuccess(ctx context.Context, r *ReconcileCnsFileAccessConfig, diff --git a/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller_test.go b/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller_test.go new file mode 100644 index 0000000000..cad0636560 --- /dev/null +++ b/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller_test.go @@ -0,0 +1,93 @@ +package cnsfileaccessconfig + +import ( + "context" + "testing" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + vmoperatorv1alpha4 "github.com/vmware-tanzu/vm-operator/api/v1alpha4" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidateVmAndPvc(t *testing.T) { + scheme := runtime.NewScheme() + _ = v1.AddToScheme(scheme) + _ = vmoperatorv1alpha4.AddToScheme(scheme) + + ctx := context.TODO() + + tests := []struct { + name string + instanceLabels map[string]string + vmLabels map[string]string + expectError bool + setupPVC bool + }{ + { + name: "no instance labels", + instanceLabels: nil, + expectError: false, + }, + { + name: "labels but not devops user", + instanceLabels: map[string]string{ + "random": "value", + }, + expectError: false, + }, + { + name: "devops user with capv label on VM", + instanceLabels: map[string]string{ + devopsUserLabelKey: "true", + }, + vmLabels: map[string]string{ + capvVmLabelKey + "/cluster": "my-cluster", + }, + expectError: true, + }, + { + name: "valid input - no errors", + instanceLabels: map[string]string{ + devopsUserLabelKey: "true", + }, + setupPVC: true, + expectError: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var objects []runtime.Object + + vm := &vmoperatorv1alpha4.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm", + Labels: test.vmLabels, + }, + } + + if test.setupPVC { + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-pvc", + Namespace: "default", + }, + } + objects = append(objects, pvc) + } + + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objects...).Build() + + err := validateVmAndPvc(ctx, test.instanceLabels, "my-instance", "my-pvc", "default", client, vm) + + if test.expectError && err == nil { + t.Errorf("expected error but got nil") + } else if !test.expectError && err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +}