Skip to content

Commit ca6ebb5

Browse files
authored
Merge pull request #3325 from helio/release-1.7-3250
[release-1.7] don't reduce replicas if ext. managed MachinePool
2 parents d4e2ca5 + b395572 commit ca6ebb5

File tree

2 files changed

+148
-4
lines changed

2 files changed

+148
-4
lines changed

azure/scope/machinepool.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) er
302302
log.V(4).Info("deleting AzureMachinePoolMachine because it no longer exists in the VMSS", "providerID", key)
303303
delete(existingMachinesByProviderID, key)
304304
if err := m.client.Delete(ctx, &machine); err != nil {
305-
return errors.Wrap(err, "failed deleting AzureMachinePoolMachine to reduce replica count")
305+
return errors.Wrap(err, "failed deleting AzureMachinePoolMachine no longer existing in Azure")
306306
}
307307
}
308308
}
@@ -321,6 +321,12 @@ func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) er
321321
return nil
322322
}
323323

324+
// when replicas are externally managed, we do not want to scale down manually since that is handled by the external scaler.
325+
if m.HasReplicasExternallyManaged(ctx) {
326+
log.V(4).Info("exiting early due to replicas externally managed")
327+
return nil
328+
}
329+
324330
deleteSelector := m.getDeploymentStrategy()
325331
if deleteSelector == nil {
326332
log.V(4).Info("can not select AzureMachinePoolMachines to delete because no deployment strategy is specified")

azure/scope/machinepool_test.go

Lines changed: 141 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ import (
3131
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3232
"k8s.io/apimachinery/pkg/runtime"
3333
"k8s.io/apimachinery/pkg/util/intstr"
34+
"k8s.io/utils/pointer"
3435
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
3536
"sigs.k8s.io/cluster-api-provider-azure/azure"
3637
"sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure"
@@ -39,6 +40,7 @@ import (
3940
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
4041
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
4142
"sigs.k8s.io/cluster-api/util/conditions"
43+
"sigs.k8s.io/controller-runtime/pkg/client"
4244
"sigs.k8s.io/controller-runtime/pkg/client/fake"
4345
)
4446

@@ -637,7 +639,7 @@ func TestMachinePoolScope_updateReplicasAndProviderIDs(t *testing.T) {
637639
Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) {
638640
g.Expect(err).NotTo(HaveOccurred())
639641
g.Expect(amp.Status.Replicas).To(BeEquivalentTo(3))
640-
g.Expect(amp.Spec.ProviderIDList).To(ConsistOf("/foo/ampm0", "/foo/ampm1", "/foo/ampm2"))
642+
g.Expect(amp.Spec.ProviderIDList).To(ConsistOf("azure://foo/ampm0", "azure://foo/ampm1", "azure://foo/ampm2"))
641643
},
642644
},
643645
{
@@ -1043,6 +1045,7 @@ func TestMachinePoolScope_VMSSExtensionSpecs(t *testing.T) {
10431045

10441046
func getReadyAzureMachinePoolMachines(count int32) []infrav1exp.AzureMachinePoolMachine {
10451047
machines := make([]infrav1exp.AzureMachinePoolMachine, count)
1048+
succeeded := infrav1.Succeeded
10461049
for i := 0; i < int(count); i++ {
10471050
machines[i] = infrav1exp.AzureMachinePoolMachine{
10481051
ObjectMeta: metav1.ObjectMeta{
@@ -1061,13 +1064,148 @@ func getReadyAzureMachinePoolMachines(count int32) []infrav1exp.AzureMachinePool
10611064
},
10621065
},
10631066
Spec: infrav1exp.AzureMachinePoolMachineSpec{
1064-
ProviderID: fmt.Sprintf("/foo/ampm%d", i),
1067+
ProviderID: fmt.Sprintf("azure://foo/ampm%d", i),
10651068
},
10661069
Status: infrav1exp.AzureMachinePoolMachineStatus{
1067-
Ready: true,
1070+
Ready: true,
1071+
ProvisioningState: &succeeded,
10681072
},
10691073
}
10701074
}
10711075

10721076
return machines
10731077
}
1078+
1079+
func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) {
1080+
ctx, cancel := context.WithCancel(context.Background())
1081+
defer cancel()
1082+
scheme := runtime.NewScheme()
1083+
_ = clusterv1.AddToScheme(scheme)
1084+
_ = infrav1exp.AddToScheme(scheme)
1085+
1086+
tests := []struct {
1087+
Name string
1088+
Setup func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder)
1089+
Verify func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client, err error)
1090+
}{
1091+
{
1092+
Name: "if MachinePool is externally managed and overProvisionCount > 0, do not try to reduce replicas",
1093+
Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) {
1094+
mp.Annotations = map[string]string{clusterv1.ReplicasManagedByAnnotation: "cluster-autoscaler"}
1095+
mp.Spec.Replicas = pointer.Int32(1)
1096+
1097+
for _, machine := range getReadyAzureMachinePoolMachines(2) {
1098+
obj := machine
1099+
cb.WithObjects(&obj)
1100+
}
1101+
vmssState.Instances = []azure.VMSSVM{
1102+
{
1103+
ID: "foo/ampm0",
1104+
Name: "ampm0",
1105+
},
1106+
{
1107+
ID: "foo/ampm1",
1108+
Name: "ampm1",
1109+
},
1110+
}
1111+
},
1112+
Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client, err error) {
1113+
g.Expect(err).NotTo(HaveOccurred())
1114+
list := infrav1exp.AzureMachinePoolMachineList{}
1115+
g.Expect(c.List(ctx, &list)).NotTo(HaveOccurred())
1116+
g.Expect(len(list.Items)).Should(Equal(2))
1117+
},
1118+
},
1119+
{
1120+
Name: "if MachinePool is not externally managed and overProvisionCount > 0, reduce replicas",
1121+
Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) {
1122+
mp.Spec.Replicas = pointer.Int32(1)
1123+
1124+
for _, machine := range getReadyAzureMachinePoolMachines(2) {
1125+
obj := machine
1126+
cb.WithObjects(&obj)
1127+
}
1128+
vmssState.Instances = []azure.VMSSVM{
1129+
{
1130+
ID: "foo/ampm0",
1131+
Name: "ampm0",
1132+
},
1133+
{
1134+
ID: "foo/ampm1",
1135+
Name: "ampm1",
1136+
},
1137+
}
1138+
},
1139+
Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client, err error) {
1140+
g.Expect(err).NotTo(HaveOccurred())
1141+
list := infrav1exp.AzureMachinePoolMachineList{}
1142+
g.Expect(c.List(ctx, &list)).NotTo(HaveOccurred())
1143+
g.Expect(len(list.Items)).Should(Equal(1))
1144+
},
1145+
},
1146+
}
1147+
for _, tt := range tests {
1148+
t.Run(tt.Name, func(t *testing.T) {
1149+
var (
1150+
g = NewWithT(t)
1151+
mockCtrl = gomock.NewController(t)
1152+
cb = fake.NewClientBuilder().WithScheme(scheme)
1153+
cluster = &clusterv1.Cluster{
1154+
ObjectMeta: metav1.ObjectMeta{
1155+
Name: "cluster1",
1156+
Namespace: "default",
1157+
},
1158+
Spec: clusterv1.ClusterSpec{
1159+
InfrastructureRef: &corev1.ObjectReference{
1160+
Name: "azCluster1",
1161+
},
1162+
},
1163+
Status: clusterv1.ClusterStatus{
1164+
InfrastructureReady: true,
1165+
},
1166+
}
1167+
mp = &expv1.MachinePool{
1168+
ObjectMeta: metav1.ObjectMeta{
1169+
Name: "mp1",
1170+
Namespace: "default",
1171+
OwnerReferences: []metav1.OwnerReference{
1172+
{
1173+
Name: "cluster1",
1174+
Kind: "Cluster",
1175+
APIVersion: clusterv1.GroupVersion.String(),
1176+
},
1177+
},
1178+
},
1179+
}
1180+
amp = &infrav1exp.AzureMachinePool{
1181+
ObjectMeta: metav1.ObjectMeta{
1182+
Name: "amp1",
1183+
Namespace: "default",
1184+
OwnerReferences: []metav1.OwnerReference{
1185+
{
1186+
Name: "mp1",
1187+
Kind: "MachinePool",
1188+
APIVersion: expv1.GroupVersion.String(),
1189+
},
1190+
},
1191+
},
1192+
}
1193+
vmssState = &azure.VMSS{}
1194+
)
1195+
defer mockCtrl.Finish()
1196+
1197+
tt.Setup(mp, amp, vmssState, cb.WithObjects(amp, cluster))
1198+
s := &MachinePoolScope{
1199+
client: cb.Build(),
1200+
ClusterScoper: &ClusterScope{
1201+
Cluster: cluster,
1202+
},
1203+
MachinePool: mp,
1204+
AzureMachinePool: amp,
1205+
vmssState: vmssState,
1206+
}
1207+
err := s.applyAzureMachinePoolMachines(ctx)
1208+
tt.Verify(g, s.AzureMachinePool, s.client, err)
1209+
})
1210+
}
1211+
}

0 commit comments

Comments
 (0)