diff --git a/examples/capi-quick-start/nutanix-cluster-calico-crs.yaml b/examples/capi-quick-start/nutanix-cluster-calico-crs.yaml index 522cc69fa..2212645ed 100644 --- a/examples/capi-quick-start/nutanix-cluster-calico-crs.yaml +++ b/examples/capi-quick-start/nutanix-cluster-calico-crs.yaml @@ -95,10 +95,8 @@ spec: serviceLoadBalancer: configuration: addressRanges: - - end: 198.18.1.10 - start: 198.18.1.1 - - end: 198.18.1.30 - start: 198.18.1.21 + - end: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} + start: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} provider: MetalLB controlPlane: nutanix: diff --git a/examples/capi-quick-start/nutanix-cluster-calico-helm-addon.yaml b/examples/capi-quick-start/nutanix-cluster-calico-helm-addon.yaml index 2e6fcdb37..482dd0a1a 100644 --- a/examples/capi-quick-start/nutanix-cluster-calico-helm-addon.yaml +++ b/examples/capi-quick-start/nutanix-cluster-calico-helm-addon.yaml @@ -92,10 +92,8 @@ spec: serviceLoadBalancer: configuration: addressRanges: - - end: 198.18.1.10 - start: 198.18.1.1 - - end: 198.18.1.30 - start: 198.18.1.21 + - end: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} + start: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} provider: MetalLB controlPlane: nutanix: diff --git a/examples/capi-quick-start/nutanix-cluster-cilium-crs.yaml b/examples/capi-quick-start/nutanix-cluster-cilium-crs.yaml index ae467b730..9ee987eec 100644 --- a/examples/capi-quick-start/nutanix-cluster-cilium-crs.yaml +++ b/examples/capi-quick-start/nutanix-cluster-cilium-crs.yaml @@ -95,10 +95,8 @@ spec: serviceLoadBalancer: configuration: addressRanges: - - end: 198.18.1.10 - start: 198.18.1.1 - - end: 198.18.1.30 - start: 198.18.1.21 + - end: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} + start: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} provider: MetalLB controlPlane: nutanix: diff --git a/examples/capi-quick-start/nutanix-cluster-cilium-helm-addon.yaml b/examples/capi-quick-start/nutanix-cluster-cilium-helm-addon.yaml index 0011d4af0..203b9df81 100644 --- a/examples/capi-quick-start/nutanix-cluster-cilium-helm-addon.yaml +++ b/examples/capi-quick-start/nutanix-cluster-cilium-helm-addon.yaml @@ -94,10 +94,8 @@ spec: serviceLoadBalancer: configuration: addressRanges: - - end: 198.18.1.10 - start: 198.18.1.1 - - end: 198.18.1.30 - start: 198.18.1.21 + - end: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} + start: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} provider: MetalLB controlPlane: nutanix: diff --git a/examples/capi-quick-start/nutanix-cluster-with-failuredomains-cilium-crs.yaml b/examples/capi-quick-start/nutanix-cluster-with-failuredomains-cilium-crs.yaml index 37002d5c0..7e9152a2d 100644 --- a/examples/capi-quick-start/nutanix-cluster-with-failuredomains-cilium-crs.yaml +++ b/examples/capi-quick-start/nutanix-cluster-with-failuredomains-cilium-crs.yaml @@ -131,10 +131,8 @@ spec: serviceLoadBalancer: configuration: addressRanges: - - end: 198.18.1.10 - start: 198.18.1.1 - - end: 198.18.1.30 - start: 198.18.1.21 + - end: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} + start: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} provider: MetalLB controlPlane: nutanix: diff --git a/examples/capi-quick-start/nutanix-cluster-with-failuredomains-cilium-helm-addon.yaml b/examples/capi-quick-start/nutanix-cluster-with-failuredomains-cilium-helm-addon.yaml index 62b958d4f..7bb37b1d1 100644 --- a/examples/capi-quick-start/nutanix-cluster-with-failuredomains-cilium-helm-addon.yaml +++ b/examples/capi-quick-start/nutanix-cluster-with-failuredomains-cilium-helm-addon.yaml @@ -130,10 +130,8 @@ spec: serviceLoadBalancer: configuration: addressRanges: - - end: 198.18.1.10 - start: 198.18.1.1 - - end: 198.18.1.30 - start: 198.18.1.21 + - end: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} + start: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} provider: MetalLB controlPlane: nutanix: diff --git a/hack/examples/bases/docker/cluster/kustomization.yaml.tmpl b/hack/examples/bases/docker/cluster/kustomization.yaml.tmpl index 19814c625..4528b8787 100644 --- a/hack/examples/bases/docker/cluster/kustomization.yaml.tmpl +++ b/hack/examples/bases/docker/cluster/kustomization.yaml.tmpl @@ -52,4 +52,4 @@ patches: # Deploy ServiceLoadBalancer MetalLB - target: kind: Cluster - path: ../../../patches/metallb.yaml + path: ../../../patches/docker/metallb.yaml diff --git a/hack/examples/bases/nutanix/cluster/kustomization.yaml.tmpl b/hack/examples/bases/nutanix/cluster/kustomization.yaml.tmpl index 1d35a4502..207dc1d1d 100644 --- a/hack/examples/bases/nutanix/cluster/kustomization.yaml.tmpl +++ b/hack/examples/bases/nutanix/cluster/kustomization.yaml.tmpl @@ -87,4 +87,4 @@ patches: # Deploy ServiceLoadBalancer MetalLB - target: kind: Cluster - path: ../../../patches/metallb.yaml + path: ../../../patches/nutanix/metallb.yaml diff --git a/hack/examples/patches/metallb.yaml b/hack/examples/patches/docker/metallb.yaml similarity index 100% rename from hack/examples/patches/metallb.yaml rename to hack/examples/patches/docker/metallb.yaml diff --git a/hack/examples/patches/nutanix/metallb.yaml b/hack/examples/patches/nutanix/metallb.yaml new file mode 100644 index 000000000..c79767ba8 --- /dev/null +++ b/hack/examples/patches/nutanix/metallb.yaml @@ -0,0 +1,11 @@ +# Copyright 2024 Nutanix. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +- op: "add" + path: "/spec/topology/variables/0/value/addons/serviceLoadBalancer" + value: + provider: MetalLB + configuration: + addressRanges: + - start: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} + end: ${KUBERNETES_SERVICE_LOAD_BALANCER_IP} diff --git a/test/e2e/config/caren.yaml b/test/e2e/config/caren.yaml index 15c883479..ac0444e89 100644 --- a/test/e2e/config/caren.yaml +++ b/test/e2e/config/caren.yaml @@ -226,6 +226,7 @@ intervals: default/wait-deployment: ["10m", "10s"] default/wait-daemonset: [ "5m", "10s" ] default/wait-statefulset: [ "10m", "10s" ] + default/wait-service: [ "10m", "10s" ] default/wait-clusterresourceset: [ "5m", "10s" ] default/wait-helmrelease: [ "5m", "10s" ] default/wait-resource: [ "5m", "10s" ] diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index 6b66d52f7..cd23a8f96 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -96,27 +96,43 @@ var _ = Describe("Quick start", func() { ) } - // For Nutanix provider, reserve an IP address for the workload cluster control plane endpoint - - // remember to unreserve it! + // For Nutanix provider, reserve an IP address for the workload cluster: + // 1. control plane endpoint + // 2. service load balancer + // Remember to unreserve it after the test! if provider == "Nutanix" { - By( - "Reserving an IP address for the workload cluster control plane endpoint", - ) nutanixClient, err := nutanix.NewV4Client( nutanix.CredentialsFromCAPIE2EConfig(testE2EConfig), ) Expect(err).ToNot(HaveOccurred()) + subnetName := testE2EConfig.MustGetVariable("NUTANIX_SUBNET_NAME") + prismElementClusterName := testE2EConfig.MustGetVariable( + "NUTANIX_PRISM_ELEMENT_CLUSTER_NAME", + ) + By( + "Reserving an IP address for the workload cluster control plane endpoint", + ) controlPlaneEndpointIP, unreserveControlPlaneEndpointIP, err := nutanix.ReserveIP( - testE2EConfig.MustGetVariable("NUTANIX_SUBNET_NAME"), - testE2EConfig.MustGetVariable( - "NUTANIX_PRISM_ELEMENT_CLUSTER_NAME", - ), + subnetName, + prismElementClusterName, nutanixClient, ) Expect(err).ToNot(HaveOccurred()) DeferCleanup(unreserveControlPlaneEndpointIP) testE2EConfig.Variables["CONTROL_PLANE_ENDPOINT_IP"] = controlPlaneEndpointIP + + By( + "Reserving an IP address for the workload cluster kubernetes Service load balancer", + ) + kubernetesServiceLoadBalancerIP, unreservekubernetesServiceLoadBalancerIP, err := nutanix.ReserveIP( + subnetName, + prismElementClusterName, + nutanixClient, + ) + Expect(err).ToNot(HaveOccurred()) + DeferCleanup(unreservekubernetesServiceLoadBalancerIP) + testE2EConfig.Variables["KUBERNETES_SERVICE_LOAD_BALANCER_IP"] = kubernetesServiceLoadBalancerIP } clusterLocalTempDir, err := os.MkdirTemp("", "clusterctl-") @@ -326,7 +342,6 @@ var _ = Describe("Quick start", func() { ClusterProxy: proxy, }, ) - EnsureAntiAffnityForRegistryAddon( ctx, EnsureAntiAffnityForRegistryAddonInput{ @@ -335,6 +350,21 @@ var _ = Describe("Quick start", func() { ClusterProxy: proxy, }, ) + + // TODO: Test for other providers. + if provider == "Nutanix" { + EnsureLoadBalancerService( + ctx, + EnsureLoadBalancerServiceInput{ + WorkloadCluster: workloadCluster, + ClusterProxy: proxy, + ServiceIntervals: testE2EConfig.GetIntervals( + flavor, + "wait-service", + ), + }, + ) + } }, } }) diff --git a/test/e2e/serviceloadbalancer_helpers.go b/test/e2e/serviceloadbalancer_helpers.go index ff43acf62..6939026d0 100644 --- a/test/e2e/serviceloadbalancer_helpers.go +++ b/test/e2e/serviceloadbalancer_helpers.go @@ -8,11 +8,19 @@ package e2e import ( "context" "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" @@ -136,3 +144,154 @@ func waitForMetalLBServiceLoadBalancerToBeReadyInWorkloadCluster( Resources: resources, }, input.resourceIntervals...) } + +type EnsureLoadBalancerServiceInput struct { + WorkloadCluster *clusterv1.Cluster + ClusterProxy framework.ClusterProxy + ServiceIntervals []interface{} +} + +// EnsureLoadBalancerService creates a test Service of type LoadBalancer and tests that the assigned IP responds. +func EnsureLoadBalancerService( + ctx context.Context, + input EnsureLoadBalancerServiceInput, +) { + workloadClusterClient := input.ClusterProxy.GetWorkloadCluster( + ctx, input.WorkloadCluster.Namespace, input.WorkloadCluster.Name, + ).GetClient() + + svc := createTestService(ctx, workloadClusterClient, input.ServiceIntervals) + + By("Testing the LoadBalancer Service responds") + getClientIPURL := &url.URL{ + Scheme: "http", + Host: getLoadBalancerAddress(svc), + Path: "/clientip", + } + output := testServiceLoadBalancer(ctx, getClientIPURL, input.ServiceIntervals) + Expect(output).ToNot(BeEmpty()) +} + +func createTestService( + ctx context.Context, + workloadClusterClient client.Client, + intervals []interface{}, +) *corev1.Service { + const ( + name = "echo" + namespace = corev1.NamespaceDefault + appKey = "app" + replicas = int32(1) + image = "registry.k8s.io/e2e-test-images/agnhost:2.57" + port = 8080 + portName = "http" + ) + + By("Creating a test Deployment for LoadBalancer Service") + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{appKey: name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{appKey: name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: name, + Image: image, + Args: []string{"netexec", fmt.Sprintf("--http-port=%d", port)}, + Ports: []corev1.ContainerPort{{ + Name: portName, + ContainerPort: int32(port), + }}, + }}, + }, + }, + }, + } + if err := workloadClusterClient.Create(ctx, deployment); err != nil { + Expect(err).ToNot(HaveOccurred()) + } + By("Waiting for Deployment to be ready") + Eventually(func(g Gomega) { + g.Expect(workloadClusterClient.Get(ctx, client.ObjectKeyFromObject(deployment), deployment)).To(Succeed()) + g.Expect(deployment.Status.ReadyReplicas).To(Equal(replicas)) + }, intervals...).Should(Succeed(), "timed out waiting for Deployment to be ready") + + By("Creating a test Service for LoadBalancer Service") + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{appKey: name}, + Ports: []corev1.ServicePort{{ + Name: portName, + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(port), + }}, + }, + } + if err := workloadClusterClient.Create(ctx, service); err != nil { + Expect(err).ToNot(HaveOccurred()) + } + By("Waiting for LoadBalacer IP/Hostname to be assigned") + Eventually(func(g Gomega) { + g.Expect(workloadClusterClient.Get(ctx, client.ObjectKeyFromObject(service), service)).To(Succeed()) + + ingress := service.Status.LoadBalancer.Ingress + g.Expect(ingress).ToNot(BeEmpty(), "no LoadBalancer ingress yet") + + ip := ingress[0].IP + hostname := ingress[0].Hostname + g.Expect(ip == "" && hostname == "").To(BeFalse(), "ingress has neither IP nor Hostname yet") + }, intervals...).Should(Succeed(), "timed out waiting for LoadBalancer IP/hostname") + + return service +} + +func getLoadBalancerAddress(svc *corev1.Service) string { + ings := svc.Status.LoadBalancer.Ingress + if len(ings) == 0 { + return "" + } + address := ings[0].IP + if address == "" { + address = ings[0].Hostname + } + return address +} + +func testServiceLoadBalancer( + ctx context.Context, + requestURL *url.URL, + intervals []interface{}, +) string { + hc := &http.Client{Timeout: 5 * time.Second} + var output string + Eventually(func(g Gomega) string { + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, requestURL.String(), http.NoBody) + resp, err := hc.Do(req) + if err != nil { + return "" + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "" + } + b, _ := io.ReadAll(resp.Body) + output = strings.TrimSpace(string(b)) + return output + }, intervals...).ShouldNot(BeEmpty(), "no response from service") + return output +}