Skip to content

Commit 76537af

Browse files
committed
add e2e test for aks multitenancy
1 parent 95fe8d9 commit 76537af

File tree

10 files changed

+402
-10
lines changed

10 files changed

+402
-10
lines changed

templates/cluster-template-aks-multi-tenancy.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,6 @@ spec:
6060
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
6161
kind: AzureManagedMachinePool
6262
name: agentpool0
63-
namespace: default
6463
version: ${KUBERNETES_VERSION}
6564
---
6665
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
@@ -90,7 +89,6 @@ spec:
9089
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
9190
kind: AzureManagedMachinePool
9291
name: agentpool1
93-
namespace: default
9492
version: ${KUBERNETES_VERSION}
9593
---
9694
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4

templates/cluster-template-aks.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@ spec:
5555
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
5656
kind: AzureManagedMachinePool
5757
name: agentpool0
58-
namespace: default
5958
version: ${KUBERNETES_VERSION}
6059
---
6160
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
@@ -85,7 +84,6 @@ spec:
8584
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
8685
kind: AzureManagedMachinePool
8786
name: agentpool1
88-
namespace: default
8987
version: ${KUBERNETES_VERSION}
9088
---
9189
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4

templates/flavors/aks/cluster-template.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,6 @@ spec:
6464
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
6565
kind: AzureManagedMachinePool
6666
name: "agentpool0"
67-
namespace: default
6867
version: "${KUBERNETES_VERSION}"
6968
---
7069
# The Azure-specific machine pool implementation drives the configuration of the
@@ -95,7 +94,6 @@ spec:
9594
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
9695
kind: AzureManagedMachinePool
9796
name: "agentpool1"
98-
namespace: default
9997
version: "${KUBERNETES_VERSION}"
10098
---
10199
# The infrastructure backing the second pool will use the same VM sku, but a larger OS disk.
Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
apiVersion: cluster.x-k8s.io/v1alpha4
2+
kind: Cluster
3+
metadata:
4+
name: ${CLUSTER_NAME}
5+
namespace: default
6+
spec:
7+
clusterNetwork:
8+
services:
9+
cidrBlocks:
10+
- 192.168.0.0/16
11+
controlPlaneRef:
12+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
13+
kind: AzureManagedControlPlane
14+
name: ${CLUSTER_NAME}
15+
infrastructureRef:
16+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
17+
kind: AzureManagedCluster
18+
name: ${CLUSTER_NAME}
19+
---
20+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
21+
kind: AzureManagedControlPlane
22+
metadata:
23+
name: ${CLUSTER_NAME}
24+
namespace: default
25+
spec:
26+
additionalTags:
27+
buildProvenance: ${BUILD_PROVENANCE}
28+
creationTimestamp: ${TIMESTAMP}
29+
jobName: ${JOB_NAME}
30+
defaultPoolRef:
31+
name: agentpool0
32+
identityRef:
33+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
34+
kind: AzureClusterIdentity
35+
name: ${CLUSTER_IDENTITY_NAME}
36+
namespace: ${CLUSTER_IDENTITY_NAMESPACE}
37+
location: ${AZURE_LOCATION}
38+
resourceGroupName: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}}
39+
sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
40+
subscriptionID: ${AZURE_SUBSCRIPTION_ID}
41+
version: ${KUBERNETES_VERSION}
42+
---
43+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
44+
kind: AzureManagedCluster
45+
metadata:
46+
name: ${CLUSTER_NAME}
47+
namespace: default
48+
---
49+
apiVersion: cluster.x-k8s.io/v1alpha4
50+
kind: MachinePool
51+
metadata:
52+
name: agentpool0
53+
namespace: default
54+
spec:
55+
clusterName: ${CLUSTER_NAME}
56+
replicas: ${WORKER_MACHINE_COUNT}
57+
template:
58+
metadata: {}
59+
spec:
60+
bootstrap:
61+
dataSecretName: ""
62+
clusterName: ${CLUSTER_NAME}
63+
infrastructureRef:
64+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
65+
kind: AzureManagedMachinePool
66+
name: agentpool0
67+
version: ${KUBERNETES_VERSION}
68+
---
69+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
70+
kind: AzureManagedMachinePool
71+
metadata:
72+
name: agentpool0
73+
namespace: default
74+
spec:
75+
osDiskSizeGB: 512
76+
sku: ${AZURE_NODE_MACHINE_TYPE}
77+
---
78+
apiVersion: cluster.x-k8s.io/v1alpha4
79+
kind: MachinePool
80+
metadata:
81+
name: agentpool1
82+
namespace: default
83+
spec:
84+
clusterName: ${CLUSTER_NAME}
85+
replicas: ${WORKER_MACHINE_COUNT}
86+
template:
87+
metadata: {}
88+
spec:
89+
bootstrap:
90+
dataSecretName: ""
91+
clusterName: ${CLUSTER_NAME}
92+
infrastructureRef:
93+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
94+
kind: AzureManagedMachinePool
95+
name: agentpool1
96+
version: ${KUBERNETES_VERSION}
97+
---
98+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
99+
kind: AzureManagedMachinePool
100+
metadata:
101+
name: agentpool1
102+
namespace: default
103+
spec:
104+
osDiskSizeGB: 1024
105+
sku: ${AZURE_NODE_MACHINE_TYPE}
106+
---
107+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
108+
kind: AzureClusterIdentity
109+
metadata:
110+
name: ${CLUSTER_IDENTITY_NAME}
111+
namespace: default
112+
spec:
113+
allowedNamespaces: {}
114+
clientID: ${AZURE_CLUSTER_IDENTITY_CLIENT_ID}
115+
clientSecret:
116+
name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME}
117+
namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}
118+
tenantID: ${AZURE_TENANT_ID}
119+
type: ServicePrincipal
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
2+
kind: AzureManagedControlPlane
3+
metadata:
4+
name: ${CLUSTER_NAME}
5+
spec:
6+
additionalTags:
7+
jobName: ${JOB_NAME}
8+
creationTimestamp: ${TIMESTAMP}
9+
buildProvenance: ${BUILD_PROVENANCE}
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
apiVersion: kustomize.config.k8s.io/v1beta1
2+
kind: Kustomization
3+
namespace: default
4+
resources:
5+
- ../../../flavors/aks-multi-tenancy
6+
patchesStrategicMerge:
7+
- ../patches/tags-aks.yaml

test/e2e/aks.go

Lines changed: 191 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,191 @@
1+
// +build e2e
2+
3+
/*
4+
Copyright 2021 The Kubernetes Authors.
5+
6+
Licensed under the Apache License, Version 2.0 (the "License");
7+
you may not use this file except in compliance with the License.
8+
You may obtain a copy of the License at
9+
10+
http://www.apache.org/licenses/LICENSE-2.0
11+
12+
Unless required by applicable law or agreed to in writing, software
13+
distributed under the License is distributed on an "AS IS" BASIS,
14+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
See the License for the specific language governing permissions and
16+
limitations under the License.
17+
*/
18+
19+
package e2e
20+
21+
import (
22+
"context"
23+
24+
"github.com/onsi/ginkgo"
25+
"github.com/onsi/gomega"
26+
"k8s.io/apimachinery/pkg/types"
27+
infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4"
28+
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
29+
clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4"
30+
"sigs.k8s.io/cluster-api/test/framework"
31+
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
32+
"sigs.k8s.io/controller-runtime/pkg/client"
33+
)
34+
35+
// WaitForControlPlaneInitialized waits for the azure managed control plane to be initialized.
36+
// This will be invoked by cluster api e2e framework.
37+
func WaitForControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, result *clusterctl.ApplyClusterTemplateAndWaitResult) {
38+
client := input.ClusterProxy.GetClient()
39+
DiscoverAndWaitForControlPlaneInitialized(ctx, DiscoverAndWaitForControlPlaneMachinesInput{
40+
Lister: client,
41+
Getter: client,
42+
Cluster: result.Cluster,
43+
}, input.WaitForControlPlaneIntervals...)
44+
}
45+
46+
// WaitForControlPlaneMachinesReady waits for the azure managed control plane to be ready.
47+
// This will be invoked by cluster api e2e framework.
48+
func WaitForControlPlaneMachinesReady(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, result *clusterctl.ApplyClusterTemplateAndWaitResult) {
49+
client := input.ClusterProxy.GetClient()
50+
DiscoverAndWaitForControlPlaneReady(ctx, DiscoverAndWaitForControlPlaneMachinesInput{
51+
Lister: client,
52+
Getter: client,
53+
Cluster: result.Cluster,
54+
}, input.WaitForControlPlaneIntervals...)
55+
}
56+
57+
// DiscoverAndWaitForControlPlaneMachinesInput contains the fields the required for checking the status of azure managed control plane.
58+
type DiscoverAndWaitForControlPlaneMachinesInput struct {
59+
Lister framework.Lister
60+
Getter framework.Getter
61+
Cluster *clusterv1.Cluster
62+
}
63+
64+
// DiscoverAndWaitForControlPlaneInitialized gets the azure managed control plane associated with the cluster,
65+
// and waits for atleast one control plane machine to be up.
66+
func DiscoverAndWaitForControlPlaneInitialized(ctx context.Context, input DiscoverAndWaitForControlPlaneMachinesInput, intervals ...interface{}) {
67+
gomega.Expect(ctx).NotTo(gomega.BeNil(), "ctx is required for DiscoverAndWaitForControlPlaneInitialized")
68+
gomega.Expect(input.Lister).ToNot(gomega.BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoverAndWaitForControlPlaneInitialized")
69+
gomega.Expect(input.Cluster).ToNot(gomega.BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoverAndWaitForControlPlaneInitialized")
70+
71+
controlPlane := GetAzureManagedControlPlaneByCluster(ctx, GetAzureManagedControlPlaneByClusterInput{
72+
Lister: input.Lister,
73+
ClusterName: input.Cluster.Name,
74+
Namespace: input.Cluster.Namespace,
75+
})
76+
gomega.Expect(controlPlane).ToNot(gomega.BeNil())
77+
78+
Logf("Waiting for the first control plane machine managed by %s/%s to be provisioned", controlPlane.Namespace, controlPlane.Name)
79+
WaitForAtLeastOneControlPlaneAndMachineToExist(ctx, WaitForControlPlaneAndMachinesReadyInput{
80+
Getter: input.Getter,
81+
ControlPlane: controlPlane,
82+
ClusterName: input.Cluster.Name,
83+
Namespace: input.Cluster.Namespace,
84+
}, intervals...)
85+
}
86+
87+
// DiscoverAndWaitForControlPlaneReady gets the azure managed control plane associated with the cluster,
88+
// and waits for all the control plane machines to be up.
89+
func DiscoverAndWaitForControlPlaneReady(ctx context.Context, input DiscoverAndWaitForControlPlaneMachinesInput, intervals ...interface{}) {
90+
gomega.Expect(ctx).NotTo(gomega.BeNil(), "ctx is required for DiscoverAndWaitForControlPlaneReady")
91+
gomega.Expect(input.Lister).ToNot(gomega.BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoverAndWaitForControlPlaneReady")
92+
gomega.Expect(input.Cluster).ToNot(gomega.BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoverAndWaitForControlPlaneReady")
93+
94+
controlPlane := GetAzureManagedControlPlaneByCluster(ctx, GetAzureManagedControlPlaneByClusterInput{
95+
Lister: input.Lister,
96+
ClusterName: input.Cluster.Name,
97+
Namespace: input.Cluster.Namespace,
98+
})
99+
gomega.Expect(controlPlane).ToNot(gomega.BeNil())
100+
101+
Logf("Waiting for the first control plane machine managed by %s/%s to be provisioned", controlPlane.Namespace, controlPlane.Name)
102+
WaitForAllControlPlaneAndMachinesToExist(ctx, WaitForControlPlaneAndMachinesReadyInput{
103+
Getter: input.Getter,
104+
ControlPlane: controlPlane,
105+
ClusterName: input.Cluster.Name,
106+
Namespace: input.Cluster.Namespace,
107+
}, intervals...)
108+
}
109+
110+
// GetAzureManagedControlPlaneByClusterInput contains the fields the required for fetching the azure managed control plane.
111+
type GetAzureManagedControlPlaneByClusterInput struct {
112+
Lister framework.Lister
113+
ClusterName string
114+
Namespace string
115+
}
116+
117+
// GetAzureManagedControlPlaneByCluster returns the AzureManagedControlPlane object for a cluster.
118+
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
119+
// it is necessary to ensure this is already happened before calling it.
120+
func GetAzureManagedControlPlaneByCluster(ctx context.Context, input GetAzureManagedControlPlaneByClusterInput) *infraexpv1.AzureManagedControlPlane {
121+
controlPlaneList := &infraexpv1.AzureManagedControlPlaneList{}
122+
gomega.Expect(input.Lister.List(ctx, controlPlaneList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(gomega.Succeed(), "Failed to list AzureManagedControlPlane object for Cluster %s/%s", input.Namespace, input.ClusterName)
123+
gomega.Expect(len(controlPlaneList.Items)).ToNot(gomega.BeNumerically(">", 1), "Cluster %s/%s should not have more than 1 AzureManagedControlPlane object", input.Namespace, input.ClusterName)
124+
if len(controlPlaneList.Items) == 1 {
125+
return &controlPlaneList.Items[0]
126+
}
127+
return nil
128+
}
129+
130+
// WaitForControlPlaneAndMachinesReadyInput contains the fields required for checking the status of azure managed control plane machines.
131+
type WaitForControlPlaneAndMachinesReadyInput struct {
132+
Getter framework.Getter
133+
ControlPlane *infraexpv1.AzureManagedControlPlane
134+
ClusterName string
135+
Namespace string
136+
}
137+
138+
// WaitForAtLeastOneControlPlaneAndMachineToExist waits for atleast one control plane machine to be provisioned.
139+
func WaitForAtLeastOneControlPlaneAndMachineToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) {
140+
ginkgo.By("Waiting for atleast one control plane node to exist")
141+
WaitForControlPlaneMachinesToExist(ctx, input, atLeastOne, intervals...)
142+
}
143+
144+
// WaitForAllControlPlaneAndMachinesToExist waits for all control plane machines to be provisioned.
145+
func WaitForAllControlPlaneAndMachinesToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) {
146+
ginkgo.By("Waiting for all control plane nodes to exist")
147+
WaitForControlPlaneMachinesToExist(ctx, input, all, intervals...)
148+
}
149+
150+
// controlPlaneReplicas represents the count of control plane machines.
151+
type controlPlaneReplicas string
152+
153+
const (
154+
atLeastOne controlPlaneReplicas = "atLeastOne"
155+
all controlPlaneReplicas = "all"
156+
)
157+
158+
// value returns the integer equivalent of controlPlaneReplicas
159+
func (r controlPlaneReplicas) value(mp *clusterv1exp.MachinePool) int {
160+
switch r {
161+
case atLeastOne:
162+
return 1
163+
case all:
164+
return int(*mp.Spec.Replicas)
165+
}
166+
return 0
167+
}
168+
169+
// WaitForControlPlaneMachinesToExist waits for a certain number of control plane machines to be provisioned represented.
170+
func WaitForControlPlaneMachinesToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, minReplicas controlPlaneReplicas, intervals ...interface{}) {
171+
gomega.Eventually(func() (bool, error) {
172+
controlPlaneMachinePool := &clusterv1exp.MachinePool{}
173+
if err := input.Getter.Get(ctx, types.NamespacedName{Namespace: input.Namespace, Name: input.ControlPlane.Spec.DefaultPoolRef.Name},
174+
controlPlaneMachinePool); err != nil {
175+
Logf("Failed to get machinePool: %+v", err)
176+
return false, err
177+
}
178+
return len(controlPlaneMachinePool.Status.NodeRefs) >= minReplicas.value(controlPlaneMachinePool), nil
179+
180+
}, intervals...).Should(gomega.Equal(true))
181+
}
182+
183+
// byClusterOptions returns a set of ListOptions that allows to identify all the objects belonging to a Cluster.
184+
func byClusterOptions(name, namespace string) []client.ListOption {
185+
return []client.ListOption{
186+
client.InNamespace(namespace),
187+
client.MatchingLabels{
188+
clusterv1.ClusterLabelName: name,
189+
},
190+
}
191+
}

0 commit comments

Comments
 (0)