|
| 1 | +/* |
| 2 | +Copyright 2025 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package e2e |
| 18 | + |
| 19 | +import ( |
| 20 | + "context" |
| 21 | + "fmt" |
| 22 | + |
| 23 | + "github.com/onsi/ginkgo/v2" |
| 24 | + "github.com/onsi/gomega" |
| 25 | + |
| 26 | + v1 "k8s.io/api/core/v1" |
| 27 | + apierrors "k8s.io/apimachinery/pkg/api/errors" |
| 28 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 29 | + clientset "k8s.io/client-go/kubernetes" |
| 30 | + "k8s.io/kubectl/pkg/drain" |
| 31 | + "k8s.io/kubernetes/test/e2e/framework" |
| 32 | + fnodes "k8s.io/kubernetes/test/e2e/framework/node" |
| 33 | + fpod "k8s.io/kubernetes/test/e2e/framework/pod" |
| 34 | + admissionapi "k8s.io/pod-security-admission/api" |
| 35 | +) |
| 36 | + |
| 37 | +var _ bool = ginkgo.Describe("[podvm-domain-isolation-vsan-max] PodVM-WLDI-Vsan-Max", func() { |
| 38 | + |
| 39 | + f := framework.NewDefaultFramework("domain-isolation") |
| 40 | + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged |
| 41 | + f.SkipNamespaceCreation = true // tests will create their own namespaces |
| 42 | + var ( |
| 43 | + client clientset.Interface |
| 44 | + namespace string |
| 45 | + storageProfileId string |
| 46 | + vcRestSessionId string |
| 47 | + allowedTopologies []v1.TopologySelectorLabelRequirement |
| 48 | + storagePolicyName string |
| 49 | + topkeyStartIndex int |
| 50 | + topologyAffinityDetails map[string][]string |
| 51 | + topologyCategories []string |
| 52 | + labelsMap map[string]string |
| 53 | + labels_ns map[string]string |
| 54 | + err error |
| 55 | + zone1 string |
| 56 | + zone2 string |
| 57 | + statuscode int |
| 58 | + nodeList *v1.NodeList |
| 59 | + uncordon bool |
| 60 | + filteredNodes *v1.NodeList |
| 61 | + dh drain.Helper |
| 62 | + ) |
| 63 | + |
| 64 | + ginkgo.BeforeEach(func() { |
| 65 | + ctx, cancel := context.WithCancel(context.Background()) |
| 66 | + defer cancel() |
| 67 | + |
| 68 | + // making vc connection |
| 69 | + client = f.ClientSet |
| 70 | + bootstrap() |
| 71 | + |
| 72 | + // reading vc session id |
| 73 | + if vcRestSessionId == "" { |
| 74 | + vcRestSessionId = createVcSession4RestApis(ctx) |
| 75 | + } |
| 76 | + |
| 77 | + nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) |
| 78 | + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") |
| 79 | + if !(len(nodeList.Items) > 0) { |
| 80 | + framework.Failf("Unable to find ready and schedulable Node") |
| 81 | + } |
| 82 | + |
| 83 | + // reading topology map set for management domain and workload domain |
| 84 | + topologyMap := GetAndExpectStringEnvVar(envTopologyMap) |
| 85 | + allowedTopologies = createAllowedTopolgies(topologyMap) |
| 86 | + topologyAffinityDetails, topologyCategories = createTopologyMapLevel5(topologyMap) |
| 87 | + |
| 88 | + // required for pod creation |
| 89 | + labels_ns = map[string]string{} |
| 90 | + labels_ns[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelPrivileged) |
| 91 | + labels_ns["e2e-framework"] = f.BaseName |
| 92 | + |
| 93 | + //setting map values |
| 94 | + labelsMap = make(map[string]string) |
| 95 | + labelsMap["app"] = "test" |
| 96 | + |
| 97 | + //zones used in the test |
| 98 | + zone1 = topologyAffinityDetails[topologyCategories[0]][0] |
| 99 | + zone2 = topologyAffinityDetails[topologyCategories[0]][1] |
| 100 | + |
| 101 | + dh = drain.Helper{ |
| 102 | + Ctx: ctx, |
| 103 | + Client: client, |
| 104 | + Force: true, |
| 105 | + IgnoreAllDaemonSets: true, |
| 106 | + Out: ginkgo.GinkgoWriter, |
| 107 | + ErrOut: ginkgo.GinkgoWriter, |
| 108 | + } |
| 109 | + }) |
| 110 | + |
| 111 | + ginkgo.AfterEach(func() { |
| 112 | + ctx, cancel := context.WithCancel(context.Background()) |
| 113 | + defer cancel() |
| 114 | + |
| 115 | + dumpSvcNsEventsOnTestFailure(client, namespace) |
| 116 | + |
| 117 | + framework.Logf("Collecting supervisor PVC events before performing PV/PVC cleanup") |
| 118 | + eventList, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) |
| 119 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 120 | + for _, item := range eventList.Items { |
| 121 | + framework.Logf("%q", item.Message) |
| 122 | + } |
| 123 | + |
| 124 | + if uncordon { |
| 125 | + ginkgo.By("Uncordoning of nodes") |
| 126 | + for i := range filteredNodes.Items { |
| 127 | + node := &filteredNodes.Items[i] |
| 128 | + ginkgo.By("Uncordoning node: " + node.Name) |
| 129 | + err := drain.RunCordonOrUncordon(&dh, node, false) |
| 130 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 131 | + } |
| 132 | + } |
| 133 | + }) |
| 134 | + |
| 135 | + /* |
| 136 | + vSAN Max vSAN Stretch zonal mount of datastore on Az2 with cordon/uncodeon of nodes |
| 137 | +
|
| 138 | + Steps: |
| 139 | + 1. Create a wcp namespace and tagged it to Az1, Az2 zone. |
| 140 | + 2. Read a zonal storage policy which is compatible only with Az2 zone using Immediate Binding mode. |
| 141 | + 3. Create pvc using zonal policy of Az2 |
| 142 | + 4. Wait for PVC and PV to reach Bound state. |
| 143 | + 5. Verify PVC has csi.vsphere.volume-accessible-topology annotation with Az2 |
| 144 | + 6. Verify PV has node affinity rule for Az2 |
| 145 | + 7. Create a Pod using pvc created above |
| 146 | + 8. wait for Pod to reach running state |
| 147 | + 9. Verify pod node annotation. |
| 148 | + 10. Cordon all the nodes in Az2 |
| 149 | + 11. Delete and re-create the above pod. |
| 150 | + 12. Scheduling should fail for the re-created pod as there are no nodes available in Az2 |
| 151 | + 13. Uncordon the nodes before cleanup |
| 152 | + */ |
| 153 | + |
| 154 | + ginkgo.It("vSAN Max vSAN Stretch zonal mount of datastore on Az2 "+ |
| 155 | + "with cordon/uncordon of nodes", ginkgo.Label(p0, wldi, vc90), func() { |
| 156 | + ctx, cancel := context.WithCancel(context.Background()) |
| 157 | + defer cancel() |
| 158 | + |
| 159 | + // reading zonal storage policy of zone-2 workload domain |
| 160 | + storagePolicyName = GetAndExpectStringEnvVar(envZonal2StoragePolicyName) |
| 161 | + storageProfileId = e2eVSphere.GetSpbmPolicyID(storagePolicyName) |
| 162 | + |
| 163 | + /* |
| 164 | + EX - zone -> zone-1, zone-2, zone-3, zone-4 |
| 165 | + so topValStartIndex=1 and topValEndIndex=2 will fetch the 1st index value from topology map string |
| 166 | + */ |
| 167 | + topValStartIndex := 0 |
| 168 | + topValEndIndex := 2 |
| 169 | + |
| 170 | + ginkgo.By("Create a WCP namespace tagged to Az1, Az2 but policy tagged is of Az2 only") |
| 171 | + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, |
| 172 | + topValEndIndex) |
| 173 | + namespace, statuscode, err = createtWcpNsWithZonesAndPolicies(vcRestSessionId, |
| 174 | + []string{storageProfileId}, getSvcId(vcRestSessionId), |
| 175 | + []string{zone1, zone2}, "", "") |
| 176 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 177 | + gomega.Expect(statuscode).To(gomega.Equal(status_code_success)) |
| 178 | + defer func() { |
| 179 | + delTestWcpNs(vcRestSessionId, namespace) |
| 180 | + gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) |
| 181 | + }() |
| 182 | + |
| 183 | + ginkgo.By("Read Az2 storage policy tagged to wcp namespace") |
| 184 | + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) |
| 185 | + if !apierrors.IsNotFound(err) { |
| 186 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 187 | + } |
| 188 | + |
| 189 | + ginkgo.By("Create PVC") |
| 190 | + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", |
| 191 | + diskSize, storageclass, true) |
| 192 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 193 | + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle |
| 194 | + |
| 195 | + ginkgo.By("Create Pod and attach it to a Pvc") |
| 196 | + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, |
| 197 | + execRWXCommandPod1) |
| 198 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 199 | + annotations := pod.Annotations |
| 200 | + vmUUID, exists := annotations[vmUUIDLabel] |
| 201 | + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) |
| 202 | + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) |
| 203 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 204 | + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), fmt.Sprintf("Volume is not attached to the node, %s", vmUUID)) |
| 205 | + |
| 206 | + ginkgo.By("Verify svc pv affinity, pvc annotation and pod node affinity") |
| 207 | + err = verifyPvcAnnotationPvAffinityPodAnnotationInSvc(ctx, client, nil, pod, nil, namespace, |
| 208 | + allowedTopologies) |
| 209 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 210 | + |
| 211 | + ginkgo.By("Fetch the node details of Az2") |
| 212 | + /* |
| 213 | + EX - zone -> zone-1, zone-2, zone-3, zone-4 |
| 214 | + so topValStartIndex=1 and topValEndIndex=2 will fetch the 1st index value from topology map string i.e |
| 215 | + fetching Az2 zone |
| 216 | + */ |
| 217 | + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, 1, |
| 218 | + 2) |
| 219 | + allowedTopologiesMap := createAllowedTopologiesMap(allowedTopologies) |
| 220 | + filteredNodes := fetchAllNodesOfSpecificZone(nodeList, allowedTopologiesMap) |
| 221 | + |
| 222 | + for _, node := range filteredNodes.Items { |
| 223 | + ginkgo.By("Cordoning of node: " + node.Name) |
| 224 | + err = drain.RunCordonOrUncordon(&dh, &node, true) |
| 225 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 226 | + ginkgo.By("Draining of node: " + node.Name) |
| 227 | + err = drain.RunNodeDrain(&dh, node.Name) |
| 228 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 229 | + } |
| 230 | + defer func() { |
| 231 | + if uncordon { |
| 232 | + ginkgo.By("Uncordoning of nodes") |
| 233 | + for i := range filteredNodes.Items { |
| 234 | + node := &filteredNodes.Items[i] |
| 235 | + ginkgo.By("Uncordoning node: " + node.Name) |
| 236 | + err := drain.RunCordonOrUncordon(&dh, node, false) |
| 237 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 238 | + } |
| 239 | + uncordon = false |
| 240 | + } |
| 241 | + }() |
| 242 | + |
| 243 | + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) |
| 244 | + err = fpod.DeletePodWithWait(ctx, client, pod) |
| 245 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 246 | + |
| 247 | + ginkgo.By("Recreate Pod and attach it to same above PVC, " + |
| 248 | + "expecting pod to fail as no nodes are available for scheduling") |
| 249 | + _, err = createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, |
| 250 | + execRWXCommandPod1) |
| 251 | + gomega.Expect(err).To(gomega.HaveOccurred()) |
| 252 | + |
| 253 | + ginkgo.By("Uncordoning of nodes") |
| 254 | + for i := range filteredNodes.Items { |
| 255 | + node := &filteredNodes.Items[i] |
| 256 | + ginkgo.By("Uncordoning node: " + node.Name) |
| 257 | + err := drain.RunCordonOrUncordon(&dh, node, false) |
| 258 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 259 | + } |
| 260 | + uncordon = true |
| 261 | + }) |
| 262 | +}) |
0 commit comments