Skip to content

Commit 5f8dfdc

Browse files
author
draveness
committed
fix(daemon): create more expections when skipping pods
1 parent c54978a commit 5f8dfdc

File tree

2 files changed

+97
-10
lines changed

2 files changed

+97
-10
lines changed

pkg/controller/daemon/daemon_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1071,7 +1071,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
10711071
}
10721072
createWait.Wait()
10731073
// any skipped pods that we never attempted to start shouldn't be expected.
1074-
skippedPods := createDiff - batchSize
1074+
skippedPods := createDiff - (batchSize + pos)
10751075
if errorCount < len(errCh) && skippedPods > 0 {
10761076
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
10771077
for i := 0; i < skippedPods; i++ {

pkg/controller/daemon/daemon_controller_test.go

Lines changed: 96 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -228,15 +228,18 @@ func addFailedPods(podStore cache.Store, nodeName string, label map[string]strin
228228
type fakePodControl struct {
229229
sync.Mutex
230230
*controller.FakePodControl
231-
podStore cache.Store
232-
podIDMap map[string]*v1.Pod
231+
podStore cache.Store
232+
podIDMap map[string]*v1.Pod
233+
expectations controller.ControllerExpectationsInterface
234+
dsc *daemonSetsController
233235
}
234236

235237
func newFakePodControl() *fakePodControl {
236238
podIDMap := make(map[string]*v1.Pod)
237239
return &fakePodControl{
238240
FakePodControl: &controller.FakePodControl{},
239-
podIDMap: podIDMap}
241+
podIDMap: podIDMap,
242+
}
240243
}
241244

242245
func (f *fakePodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
@@ -264,6 +267,11 @@ func (f *fakePodControl) CreatePodsOnNode(nodeName, namespace string, template *
264267

265268
f.podStore.Update(pod)
266269
f.podIDMap[pod.Name] = pod
270+
271+
ds := object.(*apps.DaemonSet)
272+
dsKey, _ := controller.KeyFunc(ds)
273+
f.expectations.CreationObserved(dsKey)
274+
267275
return nil
268276
}
269277

@@ -289,6 +297,11 @@ func (f *fakePodControl) CreatePodsWithControllerRef(namespace string, template
289297

290298
f.podStore.Update(pod)
291299
f.podIDMap[pod.Name] = pod
300+
301+
ds := object.(*apps.DaemonSet)
302+
dsKey, _ := controller.KeyFunc(ds)
303+
f.expectations.CreationObserved(dsKey)
304+
292305
return nil
293306
}
294307

@@ -304,6 +317,11 @@ func (f *fakePodControl) DeletePod(namespace string, podID string, object runtim
304317
}
305318
f.podStore.Delete(pod)
306319
delete(f.podIDMap, podID)
320+
321+
ds := object.(*apps.DaemonSet)
322+
dsKey, _ := controller.KeyFunc(ds)
323+
f.expectations.DeletionObserved(dsKey)
324+
307325
return nil
308326
}
309327

@@ -344,14 +362,18 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
344362
dsc.podControl = podControl
345363
podControl.podStore = informerFactory.Core().V1().Pods().Informer().GetStore()
346364

347-
return &daemonSetsController{
365+
newDsc := &daemonSetsController{
348366
dsc,
349367
informerFactory.Apps().V1().DaemonSets().Informer().GetStore(),
350368
informerFactory.Apps().V1().ControllerRevisions().Informer().GetStore(),
351369
informerFactory.Core().V1().Pods().Informer().GetStore(),
352370
informerFactory.Core().V1().Nodes().Informer().GetStore(),
353371
fakeRecorder,
354-
}, podControl, clientset, nil
372+
}
373+
374+
podControl.expectations = newDsc.expectations
375+
376+
return newDsc, podControl, clientset, nil
355377
}
356378

357379
func resetCounters(manager *daemonSetsController) {
@@ -566,6 +588,34 @@ func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
566588
}
567589
}
568590

591+
func TestDaemonSetPodCreateExpectationsError(t *testing.T) {
592+
for _, f := range []bool{true, false} {
593+
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
594+
strategies := updateStrategies()
595+
for _, strategy := range strategies {
596+
ds := newDaemonSet("foo")
597+
ds.Spec.UpdateStrategy = *strategy
598+
manager, podControl, _, err := newTestController(ds)
599+
if err != nil {
600+
t.Fatalf("error creating DaemonSets controller: %v", err)
601+
}
602+
podControl.FakePodControl.CreateLimit = 10
603+
creationExpectations := 100
604+
addNodes(manager.nodeStore, 0, 100, nil)
605+
manager.dsStore.Add(ds)
606+
syncAndValidateDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0)
607+
dsKey, err := controller.KeyFunc(ds)
608+
if err != nil {
609+
t.Fatalf("error get DaemonSets controller key: %v", err)
610+
}
611+
612+
if !manager.expectations.SatisfiedExpectations(dsKey) {
613+
t.Errorf("Unsatisfied pod creation expectatitons. Expected %d", creationExpectations)
614+
}
615+
}
616+
}
617+
}
618+
569619
func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
570620
for _, f := range []bool{true, false} {
571621
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
@@ -796,7 +846,8 @@ func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(
796846
// are sufficient resources.
797847
func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
798848
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
799-
for _, strategy := range updateStrategies() {
849+
850+
validate := func(strategy *apps.DaemonSetUpdateStrategy, expectedEvents int) {
800851
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
801852
ds := newDaemonSet("foo")
802853
ds.Spec.UpdateStrategy = *strategy
@@ -813,15 +864,33 @@ func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
813864
Status: v1.PodStatus{Phase: v1.PodSucceeded},
814865
})
815866
manager.dsStore.Add(ds)
816-
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 1)
867+
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, expectedEvents)
868+
}
869+
870+
tests := []struct {
871+
strategy *apps.DaemonSetUpdateStrategy
872+
expectedEvents int
873+
}{
874+
{
875+
strategy: newOnDeleteStrategy(),
876+
expectedEvents: 1,
877+
},
878+
{
879+
strategy: newRollbackStrategy(),
880+
expectedEvents: 2,
881+
},
882+
}
883+
884+
for _, t := range tests {
885+
validate(t.strategy, t.expectedEvents)
817886
}
818887
}
819888

820889
// When ScheduleDaemonSetPods is disabled, DaemonSets should place onto nodes with sufficient free resources.
821890
func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
822891
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
823892

824-
for _, strategy := range updateStrategies() {
893+
validate := func(strategy *apps.DaemonSetUpdateStrategy, expectedEvents int) {
825894
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
826895
ds := newDaemonSet("foo")
827896
ds.Spec.UpdateStrategy = *strategy
@@ -837,7 +906,25 @@ func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
837906
Spec: podSpec,
838907
})
839908
manager.dsStore.Add(ds)
840-
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 1)
909+
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, expectedEvents)
910+
}
911+
912+
tests := []struct {
913+
strategy *apps.DaemonSetUpdateStrategy
914+
expectedEvents int
915+
}{
916+
{
917+
strategy: newOnDeleteStrategy(),
918+
expectedEvents: 1,
919+
},
920+
{
921+
strategy: newRollbackStrategy(),
922+
expectedEvents: 2,
923+
},
924+
}
925+
926+
for _, t := range tests {
927+
validate(t.strategy, t.expectedEvents)
841928
}
842929
}
843930

0 commit comments

Comments
 (0)