@@ -655,7 +655,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() {
655655 testing .ExpectLeaderWorkerSetUpgradeInProgress (ctx , k8sClient , lws , "Rolling Upgrade is in progress" )
656656 // This should be 4 at the first step, however, reconciliation syncs quickly and
657657 // soon updated to 3 (replicas-maxUnavailable), it's fine here.
658- testing .ValidateEvent (ctx , k8sClient , controllers .GroupsUpdating , corev1 .EventTypeNormal , "Updating replicas 4 to 3" , lws .Namespace )
658+ testing .ValidateEvent (ctx , k8sClient , controllers .GroupsUpdating , corev1 .EventTypeNormal , "Updating replica 3" , lws .Namespace )
659659 testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 3 )
660660 testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 4 , 0 )
661661 },
@@ -670,7 +670,7 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() {
670670 testing .ExpectLeaderWorkerSetUnavailable (ctx , k8sClient , lws , "All replicas are ready" )
671671 testing .ExpectLeaderWorkerSetProgressing (ctx , k8sClient , lws , "Replicas are progressing" )
672672 testing .ExpectLeaderWorkerSetUpgradeInProgress (ctx , k8sClient , lws , "Rolling Upgrade is in progress" )
673- testing .ValidateEvent (ctx , k8sClient , controllers .GroupsUpdating , corev1 .EventTypeNormal , "Updating replicas 3 to 2" , lws .Namespace )
673+ testing .ValidateEvent (ctx , k8sClient , controllers .GroupsUpdating , corev1 .EventTypeNormal , "Updating replica 2" , lws .Namespace )
674674 testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 2 )
675675 testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 4 , 1 )
676676 },
@@ -1082,6 +1082,125 @@ var _ = ginkgo.Describe("LeaderWorkerSet controller", func() {
10821082 },
10831083 },
10841084 }),
1085+ ginkgo .Entry ("rolling update with maxUnavailable zero and maxSurge set" , & testCase {
1086+ makeLeaderWorkerSet : func (nsName string ) * wrappers.LeaderWorkerSetWrapper {
1087+ return wrappers .BuildLeaderWorkerSet (nsName ).Replica (4 ).MaxUnavailable (0 ).MaxSurge (1 )
1088+ },
1089+ updates : []* update {
1090+ {
1091+ // Set lws to available condition.
1092+ lwsUpdateFn : func (lws * leaderworkerset.LeaderWorkerSet ) {
1093+ testing .SetSuperPodToReady (ctx , k8sClient , lws , 4 )
1094+ },
1095+ checkLWSState : func (lws * leaderworkerset.LeaderWorkerSet ) {
1096+ testing .ExpectLeaderWorkerSetAvailable (ctx , k8sClient , lws , "All replicas are ready" )
1097+ testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 0 )
1098+ testing .ExpectValidLeaderStatefulSet (ctx , k8sClient , lws , 4 )
1099+ testing .ExpectValidWorkerStatefulSets (ctx , lws , k8sClient , true )
1100+ testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 4 , 4 )
1101+ },
1102+ },
1103+ {
1104+ // Update the worker template.
1105+ lwsUpdateFn : func (lws * leaderworkerset.LeaderWorkerSet ) {
1106+ gomega .Eventually (func () error {
1107+ var leaderworkerset leaderworkerset.LeaderWorkerSet
1108+ if err := k8sClient .Get (ctx , types.NamespacedName {Name : lws .Name , Namespace : lws .Namespace }, & leaderworkerset ); err != nil {
1109+ return err
1110+ }
1111+ leaderworkerset .Spec .LeaderWorkerTemplate .WorkerTemplate .Spec .Containers [0 ].Name = "new-worker"
1112+ return k8sClient .Update (ctx , & leaderworkerset )
1113+ }, testing .Timeout , testing .Interval ).Should (gomega .Succeed ())
1114+
1115+ var leaderSts appsv1.StatefulSet
1116+ testing .GetLeaderStatefulset (ctx , lws , k8sClient , & leaderSts )
1117+ // Create leader pod for maxSurge.
1118+ gomega .Expect (testing .CreateLeaderPods (ctx , leaderSts , k8sClient , lws , int (* lws .Spec .Replicas ), int (* lws .Spec .Replicas )+ 1 )).To (gomega .Succeed ())
1119+ },
1120+ checkLWSState : func (lws * leaderworkerset.LeaderWorkerSet ) {
1121+ testing .ExpectValidLeaderStatefulSet (ctx , k8sClient , lws , 5 )
1122+ testing .ExpectLeaderWorkerSetUnavailable (ctx , k8sClient , lws , "All replicas are ready" )
1123+ testing .ExpectLeaderWorkerSetProgressing (ctx , k8sClient , lws , "Replicas are progressing" )
1124+ testing .ExpectLeaderWorkerSetUpgradeInProgress (ctx , k8sClient , lws , "Rolling Upgrade is in progress" )
1125+ testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 4 )
1126+ testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 4 , 0 )
1127+ },
1128+ },
1129+ {
1130+ // Rolling update index-4 replica.
1131+ lwsUpdateFn : func (lws * leaderworkerset.LeaderWorkerSet ) {
1132+ testing .SetPodGroupToReady (ctx , k8sClient , lws .Name + "-4" , lws )
1133+ },
1134+ checkLWSState : func (lws * leaderworkerset.LeaderWorkerSet ) {
1135+ testing .ExpectValidLeaderStatefulSet (ctx , k8sClient , lws , 5 )
1136+ testing .ExpectLeaderWorkerSetUnavailable (ctx , k8sClient , lws , "All replicas are ready" )
1137+ testing .ExpectLeaderWorkerSetProgressing (ctx , k8sClient , lws , "Replicas are progressing" )
1138+ testing .ExpectLeaderWorkerSetUpgradeInProgress (ctx , k8sClient , lws , "Rolling Upgrade is in progress" )
1139+ testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 3 )
1140+ testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 5 , 1 )
1141+ },
1142+ },
1143+ {
1144+ // Rolling update index-3 replica.
1145+ lwsUpdateFn : func (lws * leaderworkerset.LeaderWorkerSet ) {
1146+ testing .SetPodGroupToReady (ctx , k8sClient , lws .Name + "-3" , lws )
1147+ },
1148+ checkLWSState : func (lws * leaderworkerset.LeaderWorkerSet ) {
1149+ testing .ExpectValidLeaderStatefulSet (ctx , k8sClient , lws , 5 )
1150+ testing .ExpectLeaderWorkerSetUnavailable (ctx , k8sClient , lws , "All replicas are ready" )
1151+ testing .ExpectLeaderWorkerSetProgressing (ctx , k8sClient , lws , "Replicas are progressing" )
1152+ testing .ExpectLeaderWorkerSetUpgradeInProgress (ctx , k8sClient , lws , "Rolling Upgrade is in progress" )
1153+ testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 2 )
1154+ testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 5 , 2 )
1155+ },
1156+ },
1157+ {
1158+ // Rolling update index-2 replica.
1159+ lwsUpdateFn : func (lws * leaderworkerset.LeaderWorkerSet ) {
1160+ testing .SetPodGroupToReady (ctx , k8sClient , lws .Name + "-2" , lws )
1161+ },
1162+ checkLWSState : func (lws * leaderworkerset.LeaderWorkerSet ) {
1163+ testing .ExpectValidLeaderStatefulSet (ctx , k8sClient , lws , 5 )
1164+ testing .ExpectLeaderWorkerSetUnavailable (ctx , k8sClient , lws , "All replicas are ready" )
1165+ testing .ExpectLeaderWorkerSetProgressing (ctx , k8sClient , lws , "Replicas are progressing" )
1166+ testing .ExpectLeaderWorkerSetUpgradeInProgress (ctx , k8sClient , lws , "Rolling Upgrade is in progress" )
1167+ testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 1 )
1168+ testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 5 , 3 )
1169+ },
1170+ },
1171+ {
1172+ // Rolling update index-1 replica.
1173+ lwsUpdateFn : func (lws * leaderworkerset.LeaderWorkerSet ) {
1174+ testing .SetPodGroupToReady (ctx , k8sClient , lws .Name + "-1" , lws )
1175+ },
1176+ checkLWSState : func (lws * leaderworkerset.LeaderWorkerSet ) {
1177+ testing .ExpectValidLeaderStatefulSet (ctx , k8sClient , lws , 5 )
1178+ testing .ExpectLeaderWorkerSetUnavailable (ctx , k8sClient , lws , "All replicas are ready" )
1179+ testing .ExpectLeaderWorkerSetProgressing (ctx , k8sClient , lws , "Replicas are progressing" )
1180+ testing .ExpectLeaderWorkerSetUpgradeInProgress (ctx , k8sClient , lws , "Rolling Upgrade is in progress" )
1181+ testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 0 )
1182+ testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 5 , 4 )
1183+ },
1184+ },
1185+ {
1186+ // Rolling update index-0 replica.
1187+ lwsUpdateFn : func (lws * leaderworkerset.LeaderWorkerSet ) {
1188+ testing .SetPodGroupToReady (ctx , k8sClient , lws .Name + "-0" , lws )
1189+ // Reclaim the replica.
1190+ testing .DeleteLeaderPod (ctx , k8sClient , lws , 4 , 5 )
1191+ },
1192+ checkLWSState : func (lws * leaderworkerset.LeaderWorkerSet ) {
1193+ testing .ExpectValidLeaderStatefulSet (ctx , k8sClient , lws , 4 )
1194+ testing .ExpectValidWorkerStatefulSets (ctx , lws , k8sClient , true )
1195+ testing .ExpectLeaderWorkerSetAvailable (ctx , k8sClient , lws , "All replicas are ready" )
1196+ testing .ExpectLeaderWorkerSetNotProgressing (ctx , k8sClient , lws , "Replicas are progressing" )
1197+ testing .ExpectLeaderWorkerSetNoUpgradeInProgress (ctx , k8sClient , lws , "Rolling Upgrade is in progress" )
1198+ testing .ExpectStatefulsetPartitionEqualTo (ctx , k8sClient , lws , 0 )
1199+ testing .ExpectLeaderWorkerSetStatusReplicas (ctx , k8sClient , lws , 4 , 4 )
1200+ },
1201+ },
1202+ },
1203+ }),
10851204 ginkgo .Entry ("rolling update with maxSurge set" , & testCase {
10861205 makeLeaderWorkerSet : func (nsName string ) * wrappers.LeaderWorkerSetWrapper {
10871206 return wrappers .BuildLeaderWorkerSet (nsName ).Replica (4 ).MaxSurge (1 )
0 commit comments