Skip to content

Commit 392f7d6

Browse files
authored
Merge pull request #4811 from nojnhuh/v2-e2e-machinepool-scale
add e2e test for AzureASOManagedMachinePool scaling
2 parents e6e6f5b + ff6d78e commit 392f7d6

File tree

2 files changed

+51
-12
lines changed

2 files changed

+51
-12
lines changed

test/e2e/aks_machinepools.go

Lines changed: 41 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,14 @@ import (
2323
"context"
2424
"sync"
2525

26+
asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001"
2627
. "github.com/onsi/ginkgo/v2"
2728
. "github.com/onsi/gomega"
2829
"k8s.io/apimachinery/pkg/types"
2930
"k8s.io/utils/ptr"
3031
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
32+
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1"
33+
"sigs.k8s.io/cluster-api-provider-azure/exp/mutators"
3134
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3235
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
3336
"sigs.k8s.io/cluster-api/test/framework"
@@ -46,7 +49,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
4649

4750
originalReplicas := map[types.NamespacedName]int32{}
4851
for _, mp := range input.MachinePools {
49-
originalReplicas[client.ObjectKeyFromObject(mp)] = ptr.Deref[int32](mp.Spec.Replicas, 0)
52+
originalReplicas[client.ObjectKeyFromObject(mp)] = ptr.Deref(mp.Spec.Replicas, 0)
5053
}
5154

5255
By("Scaling the machine pools out")
@@ -58,7 +61,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
5861
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
5962
ClusterProxy: bootstrapClusterProxy,
6063
Cluster: input.Cluster,
61-
Replicas: ptr.Deref[int32](mp.Spec.Replicas, 0) + 1,
64+
Replicas: ptr.Deref(mp.Spec.Replicas, 0) + 1,
6265
MachinePools: []*expv1.MachinePool{mp},
6366
WaitForMachinePoolToScale: input.WaitIntervals,
6467
})
@@ -75,7 +78,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
7578
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
7679
ClusterProxy: bootstrapClusterProxy,
7780
Cluster: input.Cluster,
78-
Replicas: ptr.Deref[int32](mp.Spec.Replicas, 0) - 1,
81+
Replicas: ptr.Deref(mp.Spec.Replicas, 0) - 1,
7982
MachinePools: []*expv1.MachinePool{mp},
8083
WaitForMachinePoolToScale: input.WaitIntervals,
8184
})
@@ -87,15 +90,41 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
8790
// System node pools cannot be scaled to 0, so only include user node pools.
8891
var machinePoolsToScale []*expv1.MachinePool
8992
for _, mp := range input.MachinePools {
90-
ammp := &infrav1.AzureManagedMachinePool{}
91-
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
92-
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
93-
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
94-
}, ammp)
95-
Expect(err).NotTo(HaveOccurred())
96-
97-
if ammp.Spec.Mode != string(infrav1.NodePoolModeSystem) {
98-
machinePoolsToScale = append(machinePoolsToScale, mp)
93+
switch mp.Spec.Template.Spec.InfrastructureRef.Kind {
94+
case infrav1.AzureManagedMachinePoolKind:
95+
ammp := &infrav1.AzureManagedMachinePool{}
96+
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
97+
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
98+
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
99+
}, ammp)
100+
Expect(err).NotTo(HaveOccurred())
101+
102+
if ammp.Spec.Mode != string(infrav1.NodePoolModeSystem) {
103+
machinePoolsToScale = append(machinePoolsToScale, mp)
104+
}
105+
case infrav1exp.AzureASOManagedMachinePoolKind:
106+
ammp := &infrav1exp.AzureASOManagedMachinePool{}
107+
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
108+
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
109+
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
110+
}, ammp)
111+
Expect(err).NotTo(HaveOccurred())
112+
113+
resources, err := mutators.ToUnstructured(ctx, ammp.Spec.Resources)
114+
Expect(err).NotTo(HaveOccurred())
115+
for _, resource := range resources {
116+
if resource.GetKind() != "ManagedClustersAgentPool" {
117+
continue
118+
}
119+
// mode may not be set in spec. Get the ASO object and check in status.
120+
resource.SetNamespace(ammp.Namespace)
121+
agentPool := &asocontainerservicev1.ManagedClustersAgentPool{}
122+
Expect(bootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(resource), agentPool)).To(Succeed())
123+
if ptr.Deref(agentPool.Status.Mode, "") != asocontainerservicev1.AgentPoolMode_STATUS_System {
124+
machinePoolsToScale = append(machinePoolsToScale, mp)
125+
}
126+
break
127+
}
99128
}
100129
}
101130

test/e2e/azure_test.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -974,6 +974,16 @@ var _ = Describe("Workload cluster creation", func() {
974974
WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
975975
}),
976976
), result)
977+
978+
By("Exercising machine pools", func() {
979+
AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
980+
return AKSMachinePoolSpecInput{
981+
Cluster: result.Cluster,
982+
MachinePools: result.MachinePools,
983+
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
984+
}
985+
})
986+
})
977987
})
978988
})
979989

0 commit comments

Comments
 (0)