From 016a67ea601feaffda265f9eea726a58f99e33c7 Mon Sep 17 00:00:00 2001 From: Bo-Cheng Chu Date: Thu, 25 Sep 2025 13:54:35 -0700 Subject: [PATCH] add a e2e test for internal load balancer only --- Makefile | 2 +- config/default/manager_image_patch.yaml | 2 +- scripts/ci-e2e.sh | 27 +++ test/e2e/config/gcp-ci.yaml | 2 +- ...late-ci-with-external-and-internal-lb.yaml | 173 ++++++++++++++++++ .../cluster-template-ci-with-internal-lb.yaml | 5 +- test/e2e/e2e_test.go | 91 ++++++--- test/e2e/suite_test.go | 25 ++- 8 files changed, 300 insertions(+), 27 deletions(-) create mode 100644 test/e2e/data/infrastructure-gcp/cluster-template-ci-with-external-and-internal-lb.yaml diff --git a/Makefile b/Makefile index c66cd3063..d489da875 100644 --- a/Makefile +++ b/Makefile @@ -176,7 +176,7 @@ SKIP_CLEANUP ?= false SKIP_CREATE_MGMT_CLUSTER ?= false .PHONY: test-e2e-run -test-e2e-run: $(ENVSUBST) $(KUBECTL) $(GINKGO) e2e-image ## Run the end-to-end tests +test-e2e-run: $(ENVSUBST) $(KUBECTL) $(GINKGO) ## Run the end-to-end tests $(ENVSUBST) < $(E2E_CONF_FILE) > $(E2E_CONF_FILE_ENVSUBST) && \ time $(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) -poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) \ --tags=e2e --focus="$(GINKGO_FOCUS)" -skip="$(GINKGO_SKIP)" --nodes=$(GINKGO_NODES) --no-color=$(GINKGO_NOCOLOR) \ diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index 0ff7fe785..2e518d436 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: gcr.io/k8s-staging-cluster-api-gcp/cluster-api-gcp-controller:e2e + - image: ${CONTROLLER_IMAGE} name: manager diff --git a/scripts/ci-e2e.sh b/scripts/ci-e2e.sh index f871d0399..3ae0bf884 100755 --- a/scripts/ci-e2e.sh +++ b/scripts/ci-e2e.sh @@ -101,6 +101,12 @@ EOF export IMAGE_ID="projects/${GCP_PROJECT}/global/images/${image_id}" } +init_controller_image() { + export CONTROLLER_IMAGE="gcr.io/${GCP_PROJECT}/cluster-api-gcp-controller:${TEST_NAME}" + echo "Tagging and pushing controller image to ${CONTROLLER_IMAGE}" + docker tag gcr.io/k8s-staging-cluster-api-gcp/cluster-api-gcp-controller:e2e "${CONTROLLER_IMAGE}" + docker push "${CONTROLLER_IMAGE}" +} # initialize a router and cloud NAT init_networks() { @@ -127,6 +133,14 @@ init_networks() { --nat-all-subnet-ip-ranges --auto-allocate-nat-external-ips } +# create a GKE cluster to be used as a bootstrap cluster +create_gke_bootstrap_cluster() { + gcloud container clusters create "${TEST_NAME}-gke-bootstrap" --project "$GCP_PROJECT" \ + --region "$GCP_REGION" --num-nodes 1 --machine-type e2-medium --release-channel regular \ + --network "${GCP_NETWORK_NAME}" --quiet + export GKE_BOOTSTRAP_KUBECONFIG="${ARTIFACTS}/gke_bootstrap_kubeconfig" + KUBECONFIG="${GKE_BOOTSTRAP_KUBECONFIG}" gcloud container clusters get-credentials "${TEST_NAME}-gke-bootstrap" --region "${GCP_REGION}" --project "${GCP_PROJECT}" +} cleanup() { # Force a cleanup of cluster api created resources using gcloud commands @@ -152,6 +166,9 @@ cleanup() { | awk '{print "gcloud compute firewall-rules delete --project '"$GCP_PROJECT"' --quiet " $1 "\n"}' \ | bash) || true + gcloud container clusters delete "${TEST_NAME}-gke-bootstrap" --project "$GCP_PROJECT" \ + --region "$GCP_REGION" --quiet || true + # cleanup the networks gcloud compute routers nats delete "${TEST_NAME}-mynat" --project="${GCP_PROJECT}" \ --router-region="${GCP_REGION}" --router="${TEST_NAME}-myrouter" --quiet || true @@ -275,6 +292,16 @@ EOF init_networks fi + # Initialize the GKE bootstrap cluster + if [[ -n "${SKIP_INIT_GKE_BOOTSTRAP:-}" ]]; then + echo "Skipping GKE bootstrap cluster initialization..." + else + create_gke_bootstrap_cluster + fi + + make e2e-image + init_controller_image + make test-e2e test_status="${?}" echo TESTSTATUS diff --git a/test/e2e/config/gcp-ci.yaml b/test/e2e/config/gcp-ci.yaml index 2789a9ab7..cc71e9527 100644 --- a/test/e2e/config/gcp-ci.yaml +++ b/test/e2e/config/gcp-ci.yaml @@ -8,7 +8,7 @@ managementClusterName: capg-e2e images: # Use local dev images built source tree; - - name: gcr.io/k8s-staging-cluster-api-gcp/cluster-api-gcp-controller:e2e + - name: ${CONTROLLER_IMAGE} loadBehavior: mustLoad providers: diff --git a/test/e2e/data/infrastructure-gcp/cluster-template-ci-with-external-and-internal-lb.yaml b/test/e2e/data/infrastructure-gcp/cluster-template-ci-with-external-and-internal-lb.yaml new file mode 100644 index 000000000..95048c8bc --- /dev/null +++ b/test/e2e/data/infrastructure-gcp/cluster-template-ci-with-external-and-internal-lb.yaml @@ -0,0 +1,173 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + labels: + cni: "${CLUSTER_NAME}-crs-cni" + ccm: "${CLUSTER_NAME}-crs-ccm" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: GCPCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + project: "${GCP_PROJECT}" + region: "${GCP_REGION}" + network: + name: "${GCP_NETWORK_NAME}" + subnets: + - name: control-plane-subnet + cidrBlock: "10.0.0.0/17" + purpose: PRIVATE + region: us-east4 + loadBalancer: + loadBalancerType: InternalExternal +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: GCPMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + initConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname.split(".")[0] }}' + kubeletExtraArgs: + cloud-provider: external + clusterConfiguration: + apiServer: + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + cloud-provider: external + allocate-node-cidrs: "false" + kubernetesVersion: "${KUBERNETES_VERSION}" + files: + - content: | + [Global] + + project-id = "${GCP_PROJECT}" + network-name = "${GCP_NETWORK_NAME}" + multizone = true + owner: root:root + path: /etc/kubernetes/cloud.config + permissions: "0744" + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname.split(".")[0] }}' + kubeletExtraArgs: + cloud-provider: external + version: "${KUBERNETES_VERSION}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + instanceType: "${GCP_CONTROL_PLANE_MACHINE_TYPE}" + image: "${IMAGE_ID}" +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: GCPMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + instanceType: "${GCP_NODE_MACHINE_TYPE}" + image: "${IMAGE_ID}" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname.split(".")[0] }}' + kubeletExtraArgs: + cloud-provider: external +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${CLUSTER_NAME}-crs-cni" +data: ${CNI_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-cni" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-cni" + resources: + - name: "${CLUSTER_NAME}-crs-cni" + kind: ConfigMap +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${CLUSTER_NAME}-crs-ccm" +data: ${CCM_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-ccm" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + ccm: "${CLUSTER_NAME}-crs-ccm" + resources: + - name: "${CLUSTER_NAME}-crs-ccm" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-gcp/cluster-template-ci-with-internal-lb.yaml b/test/e2e/data/infrastructure-gcp/cluster-template-ci-with-internal-lb.yaml index 95048c8bc..6377a022b 100644 --- a/test/e2e/data/infrastructure-gcp/cluster-template-ci-with-internal-lb.yaml +++ b/test/e2e/data/infrastructure-gcp/cluster-template-ci-with-internal-lb.yaml @@ -8,6 +8,7 @@ metadata: ccm: "${CLUSTER_NAME}-crs-ccm" spec: clusterNetwork: + apiServerPort: 6443 pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: @@ -33,8 +34,10 @@ spec: cidrBlock: "10.0.0.0/17" purpose: PRIVATE region: us-east4 + controlPlaneEndpoint: + port: 6443 loadBalancer: - loadBalancerType: InternalExternal + loadBalancerType: Internal --- apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 3e1823a27..20c7ccdff 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -35,19 +35,20 @@ import ( var _ = Describe("Workload cluster creation", func() { var ( - ctx = context.TODO() - specName = "create-workload-cluster" - namespace *corev1.Namespace - cancelWatches context.CancelFunc - result *clusterctl.ApplyClusterTemplateAndWaitResult - clusterNamePrefix string - clusterctlLogFolder string + ctx = context.TODO() + specName = "create-workload-cluster" + namespace, gkeNamespace *corev1.Namespace + cancelWatches, gkeCancelWatches context.CancelFunc + result *clusterctl.ApplyClusterTemplateAndWaitResult + clusterNamePrefix string + clusterctlLogFolder, gkeClusterctlLogFolder string ) BeforeEach(func() { Expect(e2eConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName) Expect(bootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(bootstrapGKEClusterProxy).ToNot(BeNil(), "Invalid argument. bootstrapGKEClusterProxy can't be nil when calling %s spec", specName) Expect(os.MkdirAll(artifactFolder, 0o755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName) Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) @@ -57,27 +58,43 @@ var _ = Describe("Workload cluster creation", func() { // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) + gkeNamespace, gkeCancelWatches = setupSpecNamespace(ctx, specName, bootstrapGKEClusterProxy, artifactFolder) result = new(clusterctl.ApplyClusterTemplateAndWaitResult) // We need to override clusterctl apply log folder to avoid getting our credentials exposed. clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) + gkeClusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapGKEClusterProxy.GetName()) }) AfterEach(func() { - cleanInput := cleanupInput{ - SpecName: specName, - Cluster: result.Cluster, - ClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: clusterctlConfigPath, - Namespace: namespace, - CancelWatches: cancelWatches, - IntervalsGetter: e2eConfig.GetIntervals, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactFolder, + cleanInputs := []cleanupInput{ + cleanupInput{ + SpecName: specName, + Cluster: result.Cluster, + ClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: clusterctlConfigPath, + Namespace: namespace, + CancelWatches: cancelWatches, + IntervalsGetter: e2eConfig.GetIntervals, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactFolder, + }, + cleanupInput{ + SpecName: specName, + Cluster: result.Cluster, + ClusterProxy: bootstrapGKEClusterProxy, + ClusterctlConfigPath: clusterctlConfigPath, + Namespace: gkeNamespace, + CancelWatches: gkeCancelWatches, + IntervalsGetter: e2eConfig.GetIntervals, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactFolder, + }, + } + for _, cleanInput := range cleanInputs { + dumpSpecResourcesAndCleanup(ctx, cleanInput) } - - dumpSpecResourcesAndCleanup(ctx, cleanInput) }) Context("Creating a single control-plane cluster", func() { @@ -188,8 +205,8 @@ var _ = Describe("Workload cluster creation", func() { }) }) - Context("Creating a control-plane cluster with an internal load balancer", func() { - It("Should create a cluster with 1 control-plane and 1 worker node with an internal load balancer", func() { + Context("Creating a control-plane cluster with an external and an internal load balancer", func() { + It("Should create a cluster with 1 control-plane and 1 worker node with an external and an internal load balancer", func() { clusterName := fmt.Sprintf("%s-internal-lb", clusterNamePrefix) By("Creating a cluster with internal load balancer") clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ @@ -199,7 +216,7 @@ var _ = Describe("Workload cluster creation", func() { ClusterctlConfigPath: clusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, - Flavor: "ci-with-internal-lb", + Flavor: "ci-with-external-and-internal-lb", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), @@ -213,6 +230,36 @@ var _ = Describe("Workload cluster creation", func() { }) }) + Context("Creating a control-plane cluster with an internal load balancer", func() { + It("Should create a cluster with 1 control-plane and 1 worker node with an internal load balancer", func() { + // This test requires a GKE bootstrap cluster. + if bootstrapGKEClusterProxy == nil { + Skip("test requires a GKE bootstrap cluster, GKE_BOOTSTRAP_KUBECONFIG is not set") + } + + clusterName := fmt.Sprintf("%s-internal-lb", clusterNamePrefix) + By("Creating a cluster with internal load balancer from GKE bootstrap cluster") + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: bootstrapGKEClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: gkeClusterctlLogFolder, + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapGKEClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "ci-with-internal-lb", + Namespace: gkeNamespace.Name, + ClusterName: clusterName, + KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), + }, + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + }, result) + }) + }) + Context("Creating a cluster using a cluster class", func() { It("Should create a cluster class and then a cluster based on it", func() { clusterName := fmt.Sprintf("%s-topology", clusterNamePrefix) diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index a205785d5..8dacf5c79 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -87,6 +87,10 @@ var ( // bootstrapClusterProxy allows to interact with the bootstrap cluster to be used for the e2e tests. bootstrapClusterProxy framework.ClusterProxy + // bootstrapGKEClusterProxy allows to interact with a GKE cluster to be used as bootstrap cluster + // for creating clusters with only internal load balancers. + bootstrapGKEClusterProxy framework.ClusterProxy + // kubetestConfigFilePath is the path to the kubetest configuration file kubetestConfigFilePath string @@ -152,29 +156,48 @@ var _ = SynchronizedBeforeSuite(func() []byte { By("Setting up the bootstrap cluster") bootstrapClusterProvider, bootstrapClusterProxy = setupBootstrapCluster(e2eConfig, scheme, useExistingCluster) + // If a GKE bootstrap cluster is defined, set it up. + gkeBootstrapKubeconfig := os.Getenv("GKE_BOOTSTRAP_KUBECONFIG") + if gkeBootstrapKubeconfig != "" { + By("Setting up the GKE bootstrap cluster") + bootstrapGKEClusterProxy = framework.NewClusterProxy("bootstrap-gke", gkeBootstrapKubeconfig, scheme) + } + By("Initializing the bootstrap cluster") initBootstrapCluster(bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) + // If a GKE bootstrap cluster is defined, initialize it for management. + if bootstrapGKEClusterProxy != nil { + By("Initializing the GKE bootstrap cluster") + initBootstrapCluster(bootstrapGKEClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) + } + return []byte( strings.Join([]string{ artifactFolder, configPath, clusterctlConfigPath, bootstrapClusterProxy.GetKubeconfigPath(), + gkeBootstrapKubeconfig, }, ","), ) }, func(data []byte) { // Before each ParallelNode. parts := strings.Split(string(data), ",") - Expect(parts).To(HaveLen(4)) + Expect(parts).To(HaveLen(5)) artifactFolder = parts[0] configPath = parts[1] clusterctlConfigPath = parts[2] kubeconfigPath := parts[3] + gkeKubeconfigPath := parts[4] e2eConfig = loadE2EConfig(configPath) bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme()) + + if gkeKubeconfigPath != "" { + bootstrapGKEClusterProxy = framework.NewClusterProxy("bootstrap-gke", gkeKubeconfigPath, initScheme()) + } }) // Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads).