Skip to content

Commit e48ee1f

Browse files
committed
Correctly set maxUnavailable on the leader StatefulSet
1 parent d275e0a commit e48ee1f

File tree

6 files changed

+299
-7
lines changed

6 files changed

+299
-7
lines changed

pkg/controllers/leaderworkerset_controller.go

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,14 @@ func (r *LeaderWorkerSetReconciler) Reconcile(ctx context.Context, req ctrl.Requ
166166
r.Record.Eventf(lws, revision, corev1.EventTypeNormal, GroupsProgressing, Create, fmt.Sprintf("Created leader statefulset %s", lws.Name))
167167
} else if !lwsUpdated && partition != *leaderSts.Spec.UpdateStrategy.RollingUpdate.Partition {
168168
// An event is logged to track update progress.
169-
r.Record.Eventf(lws, revision, corev1.EventTypeNormal, GroupsUpdating, Update, fmt.Sprintf("Updating replicas %d to %d", *leaderSts.Spec.UpdateStrategy.RollingUpdate.Partition, partition))
169+
oldPartition := *leaderSts.Spec.UpdateStrategy.RollingUpdate.Partition
170+
var updateMsg string
171+
if oldPartition-1 == partition {
172+
updateMsg = fmt.Sprintf("Updating replica %d", partition)
173+
} else {
174+
updateMsg = fmt.Sprintf("Updating replicas %d to %d (inclusive)", partition, oldPartition-1)
175+
}
176+
r.Record.Eventf(lws, revision, corev1.EventTypeNormal, GroupsUpdating, Update, updateMsg)
170177
}
171178

172179
// Create headless service if it does not exist.
@@ -797,6 +804,23 @@ func constructLeaderStatefulSetApplyConfiguration(lws *leaderworkerset.LeaderWor
797804

798805
podTemplateApplyConfiguration.WithAnnotations(podAnnotations)
799806

807+
lwsMaxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(&lws.Spec.RolloutStrategy.RollingUpdateConfiguration.MaxUnavailable, int(replicas), false)
808+
if err != nil {
809+
return nil, err
810+
}
811+
lwsMaxSurge, err := intstr.GetScaledValueFromIntOrPercent(&lws.Spec.RolloutStrategy.RollingUpdateConfiguration.MaxSurge, int(replicas), true)
812+
if err != nil {
813+
return nil, err
814+
}
815+
stsMaxUnavailableInt := int32(lwsMaxUnavailable + lwsMaxSurge)
816+
// lwsMaxUnavailable=0 and lwsMaxSurge=0 together should be blocked by webhook,
817+
// but just in case, we'll make sure that stsMaxUnavailable is at least 1.
818+
// This also handles the case when lws.Spec.Replicas is 0.
819+
if stsMaxUnavailableInt < 1 {
820+
stsMaxUnavailableInt = 1
821+
}
822+
stsMaxUnavailable := intstr.FromInt32(stsMaxUnavailableInt)
823+
800824
// construct statefulset apply configuration
801825
statefulSetConfig := appsapplyv1.StatefulSet(lws.Name, lws.Namespace).
802826
WithSpec(appsapplyv1.StatefulSetSpec().
@@ -805,7 +829,7 @@ func constructLeaderStatefulSetApplyConfiguration(lws *leaderworkerset.LeaderWor
805829
WithPodManagementPolicy(appsv1.ParallelPodManagement).
806830
WithTemplate(&podTemplateApplyConfiguration).
807831
WithUpdateStrategy(appsapplyv1.StatefulSetUpdateStrategy().WithType(appsv1.StatefulSetUpdateStrategyType(lws.Spec.RolloutStrategy.Type)).WithRollingUpdate(
808-
appsapplyv1.RollingUpdateStatefulSetStrategy().WithMaxUnavailable(lws.Spec.RolloutStrategy.RollingUpdateConfiguration.MaxUnavailable).WithPartition(partition),
832+
appsapplyv1.RollingUpdateStatefulSetStrategy().WithMaxUnavailable(stsMaxUnavailable).WithPartition(partition),
809833
)).
810834
WithSelector(metaapplyv1.LabelSelector().
811835
WithMatchLabels(map[string]string{

pkg/controllers/leaderworkerset_controller_test.go

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,73 @@ func TestLeaderStatefulSetApplyConfig(t *testing.T) {
288288
WorkerTemplateSpec(wrappers.MakeWorkerPodSpec()).
289289
Size(1).
290290
RestartPolicy(leaderworkerset.RecreateGroupOnPodRestart).Obj(),
291+
wantApplyConfig: &appsapplyv1.StatefulSetApplyConfiguration{
292+
TypeMetaApplyConfiguration: metaapplyv1.TypeMetaApplyConfiguration{
293+
Kind: ptr.To[string]("StatefulSet"),
294+
APIVersion: ptr.To[string]("apps/v1"),
295+
},
296+
ObjectMetaApplyConfiguration: &metaapplyv1.ObjectMetaApplyConfiguration{
297+
Name: ptr.To[string]("test-sample"),
298+
Namespace: ptr.To[string]("default"),
299+
Labels: map[string]string{
300+
"leaderworkerset.sigs.k8s.io/name": "test-sample",
301+
"leaderworkerset.sigs.k8s.io/template-revision-hash": revisionKey2,
302+
},
303+
Annotations: map[string]string{"leaderworkerset.sigs.k8s.io/replicas": "1"},
304+
},
305+
Spec: &appsapplyv1.StatefulSetSpecApplyConfiguration{
306+
Replicas: ptr.To[int32](1),
307+
Selector: &metaapplyv1.LabelSelectorApplyConfiguration{
308+
MatchLabels: map[string]string{
309+
"leaderworkerset.sigs.k8s.io/name": "test-sample",
310+
"leaderworkerset.sigs.k8s.io/worker-index": "0",
311+
},
312+
},
313+
Template: &coreapplyv1.PodTemplateSpecApplyConfiguration{
314+
ObjectMetaApplyConfiguration: &metaapplyv1.ObjectMetaApplyConfiguration{
315+
Labels: map[string]string{
316+
"leaderworkerset.sigs.k8s.io/name": "test-sample",
317+
"leaderworkerset.sigs.k8s.io/worker-index": "0",
318+
"leaderworkerset.sigs.k8s.io/template-revision-hash": revisionKey2,
319+
},
320+
Annotations: map[string]string{
321+
"leaderworkerset.sigs.k8s.io/size": "1",
322+
},
323+
},
324+
Spec: &coreapplyv1.PodSpecApplyConfiguration{
325+
Containers: []coreapplyv1.ContainerApplyConfiguration{
326+
{
327+
Name: ptr.To[string]("worker"),
328+
Image: ptr.To[string]("docker.io/nginxinc/nginx-unprivileged:1.27"),
329+
Ports: []coreapplyv1.ContainerPortApplyConfiguration{{ContainerPort: ptr.To[int32](8080), Protocol: ptr.To[corev1.Protocol](corev1.ProtocolTCP)}},
330+
Resources: &coreapplyv1.ResourceRequirementsApplyConfiguration{},
331+
},
332+
},
333+
},
334+
},
335+
ServiceName: ptr.To[string]("test-sample"),
336+
PodManagementPolicy: ptr.To[appsv1.PodManagementPolicyType](appsv1.ParallelPodManagement),
337+
UpdateStrategy: appsapplyv1.StatefulSetUpdateStrategy().
338+
WithType(appsv1.RollingUpdateStatefulSetStrategyType).
339+
WithRollingUpdate(appsapplyv1.RollingUpdateStatefulSetStrategy().WithPartition(0).WithMaxUnavailable(intstr.FromInt32(3))),
340+
},
341+
},
342+
},
343+
{
344+
name: "0 maxUnavailable, 2 maxSurge, with empty leader template, exclusive placement disabled",
345+
revisionKey: revisionKey2,
346+
lws: wrappers.BuildBasicLeaderWorkerSet("test-sample", "default").
347+
Replica(1).
348+
RolloutStrategy(leaderworkerset.RolloutStrategy{
349+
Type: leaderworkerset.RollingUpdateStrategyType,
350+
RollingUpdateConfiguration: &leaderworkerset.RollingUpdateConfiguration{
351+
MaxUnavailable: intstr.FromInt32(0),
352+
MaxSurge: intstr.FromInt32(2),
353+
},
354+
}).
355+
WorkerTemplateSpec(wrappers.MakeWorkerPodSpec()).
356+
Size(1).
357+
RestartPolicy(leaderworkerset.RecreateGroupOnPodRestart).Obj(),
291358
wantApplyConfig: &appsapplyv1.StatefulSetApplyConfiguration{
292359
TypeMetaApplyConfiguration: metaapplyv1.TypeMetaApplyConfiguration{
293360
Kind: ptr.To[string]("StatefulSet"),
@@ -526,6 +593,75 @@ func TestLeaderStatefulSetApplyConfig(t *testing.T) {
526593
},
527594
},
528595
},
596+
{
597+
name: "0 replica, 0 maxUnavailable, 0 maxSurge, with empty leader template, exclusive placement disabled",
598+
revisionKey: revisionKey2,
599+
lws: wrappers.BuildBasicLeaderWorkerSet("test-sample", "default").
600+
Replica(0).
601+
RolloutStrategy(leaderworkerset.RolloutStrategy{
602+
Type: leaderworkerset.RollingUpdateStrategyType,
603+
RollingUpdateConfiguration: &leaderworkerset.RollingUpdateConfiguration{
604+
MaxUnavailable: intstr.FromInt32(0),
605+
MaxSurge: intstr.FromInt32(0),
606+
},
607+
}).
608+
WorkerTemplateSpec(wrappers.MakeWorkerPodSpec()).
609+
Size(1).
610+
RestartPolicy(leaderworkerset.RecreateGroupOnPodRestart).Obj(),
611+
wantApplyConfig: &appsapplyv1.StatefulSetApplyConfiguration{
612+
TypeMetaApplyConfiguration: metaapplyv1.TypeMetaApplyConfiguration{
613+
Kind: ptr.To[string]("StatefulSet"),
614+
APIVersion: ptr.To[string]("apps/v1"),
615+
},
616+
ObjectMetaApplyConfiguration: &metaapplyv1.ObjectMetaApplyConfiguration{
617+
Name: ptr.To[string]("test-sample"),
618+
Namespace: ptr.To[string]("default"),
619+
Labels: map[string]string{
620+
"leaderworkerset.sigs.k8s.io/name": "test-sample",
621+
"leaderworkerset.sigs.k8s.io/template-revision-hash": revisionKey2,
622+
},
623+
Annotations: map[string]string{"leaderworkerset.sigs.k8s.io/replicas": "0"},
624+
},
625+
Spec: &appsapplyv1.StatefulSetSpecApplyConfiguration{
626+
Replicas: ptr.To[int32](0),
627+
Selector: &metaapplyv1.LabelSelectorApplyConfiguration{
628+
MatchLabels: map[string]string{
629+
"leaderworkerset.sigs.k8s.io/name": "test-sample",
630+
"leaderworkerset.sigs.k8s.io/worker-index": "0",
631+
},
632+
},
633+
Template: &coreapplyv1.PodTemplateSpecApplyConfiguration{
634+
ObjectMetaApplyConfiguration: &metaapplyv1.ObjectMetaApplyConfiguration{
635+
Labels: map[string]string{
636+
"leaderworkerset.sigs.k8s.io/name": "test-sample",
637+
"leaderworkerset.sigs.k8s.io/worker-index": "0",
638+
"leaderworkerset.sigs.k8s.io/template-revision-hash": revisionKey2,
639+
},
640+
Annotations: map[string]string{
641+
"leaderworkerset.sigs.k8s.io/size": "1",
642+
},
643+
},
644+
Spec: &coreapplyv1.PodSpecApplyConfiguration{
645+
Containers: []coreapplyv1.ContainerApplyConfiguration{
646+
{
647+
Name: ptr.To[string]("worker"),
648+
Image: ptr.To[string]("docker.io/nginxinc/nginx-unprivileged:1.27"),
649+
Ports: []coreapplyv1.ContainerPortApplyConfiguration{{ContainerPort: ptr.To[int32](8080), Protocol: ptr.To[corev1.Protocol](corev1.ProtocolTCP)}},
650+
Resources: &coreapplyv1.ResourceRequirementsApplyConfiguration{},
651+
},
652+
},
653+
},
654+
},
655+
ServiceName: ptr.To[string]("test-sample"),
656+
PodManagementPolicy: ptr.To[appsv1.PodManagementPolicyType](appsv1.ParallelPodManagement),
657+
UpdateStrategy: appsapplyv1.StatefulSetUpdateStrategy().
658+
WithType(appsv1.RollingUpdateStatefulSetStrategyType).
659+
// Sts maxUnavailable is forced to be at least 1,
660+
// even if lws maxUnavailable=0 and lws maxSurge=0.
661+
WithRollingUpdate(appsapplyv1.RollingUpdateStatefulSetStrategy().WithPartition(0).WithMaxUnavailable(intstr.FromInt32(1))),
662+
},
663+
},
664+
},
529665
}
530666

531667
for _, tc := range tests {

pkg/controllers/pod_controller.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,9 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R
190190
return ctrl.Result{}, err
191191
}
192192
if err = r.Create(ctx, workerStatefulSet); err != nil {
193-
r.Record.Eventf(&leaderWorkerSet, &pod, corev1.EventTypeWarning, FailedCreate, Create, fmt.Sprintf("Failed to create worker statefulset for leader pod %s", pod.Name))
193+
if client.IgnoreAlreadyExists(err) != nil {
194+
r.Record.Eventf(&leaderWorkerSet, &pod, corev1.EventTypeWarning, FailedCreate, Create, fmt.Sprintf("Failed to create worker statefulset for leader pod %s", pod.Name))
195+
}
194196
return ctrl.Result{}, client.IgnoreAlreadyExists(err)
195197
}
196198
r.Record.Eventf(&leaderWorkerSet, &pod, corev1.EventTypeNormal, GroupsProgressing, Create, fmt.Sprintf("Created worker statefulset for leader pod %s", pod.Name))

site/content/en/docs/concepts/rollout-strategy/_index.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,10 @@ description: >
77

88
Rolling update is vital to online services with zero downtime. For LLM inference services, this is particularly important, which helps to mitigate stockout. Two different configurations are supported in LWS, `maxUnavailable` and `maxSurge`:
99

10-
- `MaxUnavailable`: Indicates how many replicas are allowed to be unavailable during the update, the unavailable number is based on the spec.replicas. Defaults to 1. Note that only values >= 1 are supported.
11-
- `MaxSurge`: Indicates how many extra replicas can be deployed during the update. Defaults to 0.
10+
- `maxUnavailable`: Indicates how many replicas are allowed to be unavailable during the update, the unavailable number is based on the spec.replicas. Defaults to 1.
11+
- `maxSurge`: Indicates how many extra replicas can be deployed during the update. Defaults to 0.
12+
13+
Note that `maxSurge` and `maxUnavailable` can not both be zero at the same time.
1214

1315
Here's a leaderWorkerSet configured with rollout strategy, you can find the example [here](https://github.com/kubernetes-sigs/lws/blob/main/docs/examples/sample/lws-rollout-strategy.yaml):
1416

test/integration/controllers/leaderworkerset_test.go

Lines changed: 121 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -655,7 +655,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() {
655655
testing.ExpectLeaderWorkerSetUpgradeInProgress(ctx, k8sClient, lws, "Rolling Upgrade is in progress")
656656
// This should be 4 at the first step, however, reconciliation syncs quickly and
657657
// soon updated to 3 (replicas-maxUnavailable), it's fine here.
658-
testing.ValidateEvent(ctx, k8sClient, controllers.GroupsUpdating, corev1.EventTypeNormal, "Updating replicas 4 to 3", lws.Namespace)
658+
testing.ValidateEvent(ctx, k8sClient, controllers.GroupsUpdating, corev1.EventTypeNormal, "Updating replica 3", lws.Namespace)
659659
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 3)
660660
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 0)
661661
},
@@ -670,7 +670,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() {
670670
testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready")
671671
testing.ExpectLeaderWorkerSetProgressing(ctx, k8sClient, lws, "Replicas are progressing")
672672
testing.ExpectLeaderWorkerSetUpgradeInProgress(ctx, k8sClient, lws, "Rolling Upgrade is in progress")
673-
testing.ValidateEvent(ctx, k8sClient, controllers.GroupsUpdating, corev1.EventTypeNormal, "Updating replicas 3 to 2", lws.Namespace)
673+
testing.ValidateEvent(ctx, k8sClient, controllers.GroupsUpdating, corev1.EventTypeNormal, "Updating replica 2", lws.Namespace)
674674
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2)
675675
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 1)
676676
},
@@ -1082,6 +1082,125 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() {
10821082
},
10831083
},
10841084
}),
1085+
ginkgo.Entry("rolling update with maxUnavailable zero and maxSurge set", &testCase{
1086+
makeLeaderWorkerSet: func(nsName string) *wrappers.LeaderWorkerSetWrapper {
1087+
return wrappers.BuildLeaderWorkerSet(nsName).Replica(4).MaxUnavailable(0).MaxSurge(1)
1088+
},
1089+
updates: []*update{
1090+
{
1091+
// Set lws to available condition.
1092+
lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) {
1093+
testing.SetSuperPodToReady(ctx, k8sClient, lws, 4)
1094+
},
1095+
checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) {
1096+
testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready")
1097+
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0)
1098+
testing.ExpectValidLeaderStatefulSet(ctx, k8sClient, lws, 4)
1099+
testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true)
1100+
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 4)
1101+
},
1102+
},
1103+
{
1104+
// Update the worker template.
1105+
lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) {
1106+
gomega.Eventually(func() error {
1107+
var leaderworkerset leaderworkerset.LeaderWorkerSet
1108+
if err := k8sClient.Get(ctx, types.NamespacedName{Name: lws.Name, Namespace: lws.Namespace}, &leaderworkerset); err != nil {
1109+
return err
1110+
}
1111+
leaderworkerset.Spec.LeaderWorkerTemplate.WorkerTemplate.Spec.Containers[0].Name = "new-worker"
1112+
return k8sClient.Update(ctx, &leaderworkerset)
1113+
}, testing.Timeout, testing.Interval).Should(gomega.Succeed())
1114+
1115+
var leaderSts appsv1.StatefulSet
1116+
testing.GetLeaderStatefulset(ctx, lws, k8sClient, &leaderSts)
1117+
// Create leader pod for maxSurge.
1118+
gomega.Expect(testing.CreateLeaderPods(ctx, leaderSts, k8sClient, lws, int(*lws.Spec.Replicas), int(*lws.Spec.Replicas)+1)).To(gomega.Succeed())
1119+
},
1120+
checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) {
1121+
testing.ExpectValidLeaderStatefulSet(ctx, k8sClient, lws, 5)
1122+
testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready")
1123+
testing.ExpectLeaderWorkerSetProgressing(ctx, k8sClient, lws, "Replicas are progressing")
1124+
testing.ExpectLeaderWorkerSetUpgradeInProgress(ctx, k8sClient, lws, "Rolling Upgrade is in progress")
1125+
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 4)
1126+
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 0)
1127+
},
1128+
},
1129+
{
1130+
// Rolling update index-4 replica.
1131+
lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) {
1132+
testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-4", lws)
1133+
},
1134+
checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) {
1135+
testing.ExpectValidLeaderStatefulSet(ctx, k8sClient, lws, 5)
1136+
testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready")
1137+
testing.ExpectLeaderWorkerSetProgressing(ctx, k8sClient, lws, "Replicas are progressing")
1138+
testing.ExpectLeaderWorkerSetUpgradeInProgress(ctx, k8sClient, lws, "Rolling Upgrade is in progress")
1139+
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 3)
1140+
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 5, 1)
1141+
},
1142+
},
1143+
{
1144+
// Rolling update index-3 replica.
1145+
lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) {
1146+
testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-3", lws)
1147+
},
1148+
checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) {
1149+
testing.ExpectValidLeaderStatefulSet(ctx, k8sClient, lws, 5)
1150+
testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready")
1151+
testing.ExpectLeaderWorkerSetProgressing(ctx, k8sClient, lws, "Replicas are progressing")
1152+
testing.ExpectLeaderWorkerSetUpgradeInProgress(ctx, k8sClient, lws, "Rolling Upgrade is in progress")
1153+
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 2)
1154+
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 5, 2)
1155+
},
1156+
},
1157+
{
1158+
// Rolling update index-2 replica.
1159+
lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) {
1160+
testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-2", lws)
1161+
},
1162+
checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) {
1163+
testing.ExpectValidLeaderStatefulSet(ctx, k8sClient, lws, 5)
1164+
testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready")
1165+
testing.ExpectLeaderWorkerSetProgressing(ctx, k8sClient, lws, "Replicas are progressing")
1166+
testing.ExpectLeaderWorkerSetUpgradeInProgress(ctx, k8sClient, lws, "Rolling Upgrade is in progress")
1167+
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 1)
1168+
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 5, 3)
1169+
},
1170+
},
1171+
{
1172+
// Rolling update index-1 replica.
1173+
lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) {
1174+
testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-1", lws)
1175+
},
1176+
checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) {
1177+
testing.ExpectValidLeaderStatefulSet(ctx, k8sClient, lws, 5)
1178+
testing.ExpectLeaderWorkerSetUnavailable(ctx, k8sClient, lws, "All replicas are ready")
1179+
testing.ExpectLeaderWorkerSetProgressing(ctx, k8sClient, lws, "Replicas are progressing")
1180+
testing.ExpectLeaderWorkerSetUpgradeInProgress(ctx, k8sClient, lws, "Rolling Upgrade is in progress")
1181+
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0)
1182+
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 5, 4)
1183+
},
1184+
},
1185+
{
1186+
// Rolling update index-0 replica.
1187+
lwsUpdateFn: func(lws *leaderworkerset.LeaderWorkerSet) {
1188+
testing.SetPodGroupToReady(ctx, k8sClient, lws.Name+"-0", lws)
1189+
// Reclaim the replica.
1190+
testing.DeleteLeaderPod(ctx, k8sClient, lws, 4, 5)
1191+
},
1192+
checkLWSState: func(lws *leaderworkerset.LeaderWorkerSet) {
1193+
testing.ExpectValidLeaderStatefulSet(ctx, k8sClient, lws, 4)
1194+
testing.ExpectValidWorkerStatefulSets(ctx, lws, k8sClient, true)
1195+
testing.ExpectLeaderWorkerSetAvailable(ctx, k8sClient, lws, "All replicas are ready")
1196+
testing.ExpectLeaderWorkerSetNotProgressing(ctx, k8sClient, lws, "Replicas are progressing")
1197+
testing.ExpectLeaderWorkerSetNoUpgradeInProgress(ctx, k8sClient, lws, "Rolling Upgrade is in progress")
1198+
testing.ExpectStatefulsetPartitionEqualTo(ctx, k8sClient, lws, 0)
1199+
testing.ExpectLeaderWorkerSetStatusReplicas(ctx, k8sClient, lws, 4, 4)
1200+
},
1201+
},
1202+
},
1203+
}),
10851204
ginkgo.Entry("rolling update with maxSurge set", &testCase{
10861205
makeLeaderWorkerSet: func(nsName string) *wrappers.LeaderWorkerSetWrapper {
10871206
return wrappers.BuildLeaderWorkerSet(nsName).Replica(4).MaxSurge(1)

0 commit comments

Comments
 (0)