|
| 1 | +//go:build e2e |
| 2 | +// +build e2e |
| 3 | + |
| 4 | +/* |
| 5 | +Copyright 2022 The Kubernetes Authors. |
| 6 | +
|
| 7 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | +you may not use this file except in compliance with the License. |
| 9 | +You may obtain a copy of the License at |
| 10 | +
|
| 11 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | +
|
| 13 | +Unless required by applicable law or agreed to in writing, software |
| 14 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | +See the License for the specific language governing permissions and |
| 17 | +limitations under the License. |
| 18 | +*/ |
| 19 | + |
| 20 | +package e2e |
| 21 | + |
| 22 | +import ( |
| 23 | + "context" |
| 24 | + "fmt" |
| 25 | + "os" |
| 26 | + "path/filepath" |
| 27 | + |
| 28 | + . "github.com/onsi/ginkgo" |
| 29 | + . "github.com/onsi/gomega" |
| 30 | + |
| 31 | + corev1 "k8s.io/api/core/v1" |
| 32 | + "k8s.io/utils/pointer" |
| 33 | + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" |
| 34 | + |
| 35 | + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" |
| 36 | + "sigs.k8s.io/cluster-api/test/framework" |
| 37 | + "sigs.k8s.io/cluster-api/test/framework/clusterctl" |
| 38 | + "sigs.k8s.io/cluster-api/util" |
| 39 | +) |
| 40 | + |
| 41 | +var _ = Describe("Workload cluster creation", func() { |
| 42 | + var ( |
| 43 | + ctx = context.TODO() |
| 44 | + specName = "create-workload-cluster" |
| 45 | + namespace *corev1.Namespace |
| 46 | + cancelWatches context.CancelFunc |
| 47 | + result *clusterctl.ApplyClusterTemplateAndWaitResult |
| 48 | + clusterName string |
| 49 | + clusterctlLogFolder string |
| 50 | + cniPath string |
| 51 | + ) |
| 52 | + |
| 53 | + BeforeEach(func() { |
| 54 | + Expect(e2eConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) |
| 55 | + Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName) |
| 56 | + Expect(bootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName) |
| 57 | + Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName) |
| 58 | + |
| 59 | + Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) |
| 60 | + |
| 61 | + clusterName = fmt.Sprintf("capi-ibmcloud-e2e-%s", util.RandomString(6)) |
| 62 | + |
| 63 | + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. |
| 64 | + namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) |
| 65 | + |
| 66 | + result = new(clusterctl.ApplyClusterTemplateAndWaitResult) |
| 67 | + |
| 68 | + // We need to override clusterctl apply log folder to avoid getting our credentials exposed. |
| 69 | + clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) |
| 70 | + |
| 71 | + // Path to the CNI file is defined in the config |
| 72 | + Expect(e2eConfig.Variables).To(HaveKey(capi_e2e.CNIPath), "Missing %s variable in the config", capi_e2e.CNIPath) |
| 73 | + cniPath = e2eConfig.GetVariable(capi_e2e.CNIPath) |
| 74 | + }) |
| 75 | + |
| 76 | + AfterEach(func() { |
| 77 | + cleanInput := cleanupInput{ |
| 78 | + SpecName: specName, |
| 79 | + Cluster: result.Cluster, |
| 80 | + ClusterProxy: bootstrapClusterProxy, |
| 81 | + Namespace: namespace, |
| 82 | + CancelWatches: cancelWatches, |
| 83 | + IntervalsGetter: e2eConfig.GetIntervals, |
| 84 | + SkipCleanup: skipCleanup, |
| 85 | + ArtifactFolder: artifactFolder, |
| 86 | + } |
| 87 | + |
| 88 | + dumpSpecResourcesAndCleanup(ctx, cleanInput) |
| 89 | + }) |
| 90 | + |
| 91 | + Context("Creating a single control-plane cluster", func() { |
| 92 | + It("Should create a cluster with 1 worker node and can be scaled", func() { |
| 93 | + By("Initializing with 1 worker node") |
| 94 | + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ |
| 95 | + ClusterProxy: bootstrapClusterProxy, |
| 96 | + ConfigCluster: clusterctl.ConfigClusterInput{ |
| 97 | + LogFolder: clusterctlLogFolder, |
| 98 | + ClusterctlConfigPath: clusterctlConfigPath, |
| 99 | + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), |
| 100 | + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, |
| 101 | + Flavor: flavor, |
| 102 | + Namespace: namespace.Name, |
| 103 | + ClusterName: clusterName, |
| 104 | + KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), |
| 105 | + ControlPlaneMachineCount: pointer.Int64Ptr(1), |
| 106 | + WorkerMachineCount: pointer.Int64Ptr(1), |
| 107 | + }, |
| 108 | + CNIManifestPath: cniPath, |
| 109 | + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), |
| 110 | + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), |
| 111 | + WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), |
| 112 | + }, result) |
| 113 | + |
| 114 | + By("Scaling worker node to 3") |
| 115 | + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ |
| 116 | + ClusterProxy: bootstrapClusterProxy, |
| 117 | + ConfigCluster: clusterctl.ConfigClusterInput{ |
| 118 | + LogFolder: clusterctlLogFolder, |
| 119 | + ClusterctlConfigPath: clusterctlConfigPath, |
| 120 | + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), |
| 121 | + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, |
| 122 | + Flavor: flavor, |
| 123 | + Namespace: namespace.Name, |
| 124 | + ClusterName: clusterName, |
| 125 | + KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), |
| 126 | + ControlPlaneMachineCount: pointer.Int64Ptr(1), |
| 127 | + WorkerMachineCount: pointer.Int64Ptr(3), |
| 128 | + }, |
| 129 | + CNIManifestPath: cniPath, |
| 130 | + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), |
| 131 | + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), |
| 132 | + WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), |
| 133 | + }, result) |
| 134 | + }) |
| 135 | + }) |
| 136 | + |
| 137 | + Context("Creating a highly available control-plane cluster", func() { |
| 138 | + It("Should create a cluster with 3 control-plane nodes and 1 worker node", func() { |
| 139 | + By("Creating a high available cluster") |
| 140 | + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ |
| 141 | + ClusterProxy: bootstrapClusterProxy, |
| 142 | + ConfigCluster: clusterctl.ConfigClusterInput{ |
| 143 | + LogFolder: clusterctlLogFolder, |
| 144 | + ClusterctlConfigPath: clusterctlConfigPath, |
| 145 | + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), |
| 146 | + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, |
| 147 | + Flavor: flavor, |
| 148 | + Namespace: namespace.Name, |
| 149 | + ClusterName: clusterName, |
| 150 | + KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), |
| 151 | + ControlPlaneMachineCount: pointer.Int64Ptr(3), |
| 152 | + WorkerMachineCount: pointer.Int64Ptr(1), |
| 153 | + }, |
| 154 | + CNIManifestPath: cniPath, |
| 155 | + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), |
| 156 | + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), |
| 157 | + WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), |
| 158 | + }, result) |
| 159 | + }) |
| 160 | + }) |
| 161 | +}) |
| 162 | + |
| 163 | +func Byf(format string, a ...interface{}) { |
| 164 | + By(fmt.Sprintf(format, a...)) |
| 165 | +} |
| 166 | + |
| 167 | +type cleanupInput struct { |
| 168 | + SpecName string |
| 169 | + ClusterProxy framework.ClusterProxy |
| 170 | + ArtifactFolder string |
| 171 | + Namespace *corev1.Namespace |
| 172 | + CancelWatches context.CancelFunc |
| 173 | + Cluster *clusterv1.Cluster |
| 174 | + IntervalsGetter func(spec, key string) []interface{} |
| 175 | + SkipCleanup bool |
| 176 | + AdditionalCleanup func() |
| 177 | +} |
| 178 | + |
| 179 | +func setupSpecNamespace(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc) { |
| 180 | + Byf("Creating a namespace for hosting the %q test spec", specName) |
| 181 | + namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ |
| 182 | + Creator: clusterProxy.GetClient(), |
| 183 | + ClientSet: clusterProxy.GetClientSet(), |
| 184 | + Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), |
| 185 | + LogFolder: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName()), |
| 186 | + }) |
| 187 | + |
| 188 | + return namespace, cancelWatches |
| 189 | +} |
| 190 | + |
| 191 | +func dumpSpecResourcesAndCleanup(ctx context.Context, input cleanupInput) { |
| 192 | + defer func() { |
| 193 | + input.CancelWatches() |
| 194 | + }() |
| 195 | + |
| 196 | + if input.Cluster == nil { |
| 197 | + By("Unable to dump workload cluster logs as the cluster is nil") |
| 198 | + } else { |
| 199 | + Byf("Dumping logs from the %q workload cluster", input.Cluster.Name) |
| 200 | + input.ClusterProxy.CollectWorkloadClusterLogs(ctx, input.Cluster.Namespace, input.Cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", input.Cluster.Name)) |
| 201 | + } |
| 202 | + |
| 203 | + Byf("Dumping all the Cluster API resources in the %q namespace", input.Namespace.Name) |
| 204 | + // Dump all Cluster API related resources to artifacts before deleting them. |
| 205 | + framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ |
| 206 | + Lister: input.ClusterProxy.GetClient(), |
| 207 | + Namespace: input.Namespace.Name, |
| 208 | + LogPath: filepath.Join(input.ArtifactFolder, "clusters", input.ClusterProxy.GetName(), "resources"), |
| 209 | + }) |
| 210 | + |
| 211 | + if input.SkipCleanup { |
| 212 | + return |
| 213 | + } |
| 214 | + |
| 215 | + Byf("Deleting all clusters in the %s namespace", input.Namespace.Name) |
| 216 | + framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{ |
| 217 | + Client: input.ClusterProxy.GetClient(), |
| 218 | + Namespace: input.Namespace.Name, |
| 219 | + }, input.IntervalsGetter(input.SpecName, "wait-delete-cluster")...) |
| 220 | + |
| 221 | + Byf("Deleting namespace used for hosting the %q test spec", input.SpecName) |
| 222 | + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ |
| 223 | + Deleter: input.ClusterProxy.GetClient(), |
| 224 | + Name: input.Namespace.Name, |
| 225 | + }) |
| 226 | + |
| 227 | + if input.AdditionalCleanup != nil { |
| 228 | + Byf("Running additional cleanup for the %q test spec", input.SpecName) |
| 229 | + input.AdditionalCleanup() |
| 230 | + } |
| 231 | +} |
0 commit comments