Skip to content

Commit ff5cb37

Browse files
authored
Merge pull request kubernetes#127903 from soltysh/test_daemonset
Add unit tests verifying the update touches old, unhealthy pods first, and only after new pods
2 parents c19ffb7 + 174288d commit ff5cb37

File tree

1 file changed

+214
-0
lines changed

1 file changed

+214
-0
lines changed

pkg/controller/daemon/update_test.go

Lines changed: 214 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,10 @@ package daemon
1818

1919
import (
2020
"context"
21+
"fmt"
22+
"math/rand"
2123
"reflect"
24+
"strings"
2225
"testing"
2326
"time"
2427

@@ -27,6 +30,7 @@ import (
2730
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2831
"k8s.io/apimachinery/pkg/labels"
2932
"k8s.io/apimachinery/pkg/util/intstr"
33+
"k8s.io/apimachinery/pkg/util/sets"
3034
"k8s.io/klog/v2/ktesting"
3135
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
3236
"k8s.io/kubernetes/pkg/controller/daemon/util"
@@ -294,6 +298,216 @@ func TestDaemonSetUpdatesSomeOldPodsNotReady(t *testing.T) {
294298
t.Fatalf("Expected %d old ready pods, but found %d", expectedReadyCount, readyCount)
295299
}
296300
}
301+
func TestDaemonSetUpdatesSaveOldHealthyPods(t *testing.T) {
302+
_, ctx := ktesting.NewTestContext(t)
303+
ds := newDaemonSet("foo")
304+
manager, podControl, _, err := newTestController(ctx, ds)
305+
if err != nil {
306+
t.Fatalf("error creating DaemonSets controller: %v", err)
307+
}
308+
addNodes(manager.nodeStore, 0, 20, nil)
309+
err = manager.dsStore.Add(ds)
310+
if err != nil {
311+
t.Fatal(err)
312+
}
313+
expectSyncDaemonSets(t, manager, ds, podControl, 20, 0, 0)
314+
markPodsReady(podControl.podStore)
315+
316+
t.Logf("first update to get 10 old pods which should never be touched")
317+
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
318+
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
319+
maxUnavailable := 10
320+
intStr := intstr.FromInt(maxUnavailable)
321+
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
322+
err = manager.dsStore.Update(ds)
323+
if err != nil {
324+
t.Fatal(err)
325+
}
326+
327+
clearExpectations(t, manager, ds, podControl)
328+
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
329+
330+
clearExpectations(t, manager, ds, podControl)
331+
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
332+
333+
clearExpectations(t, manager, ds, podControl)
334+
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
335+
clearExpectations(t, manager, ds, podControl)
336+
337+
// save the pods we want to maintain running
338+
oldReadyPods := sets.Set[string]{}
339+
for _, obj := range podControl.podStore.List() {
340+
pod := obj.(*v1.Pod)
341+
if podutil.IsPodReady(pod) {
342+
oldReadyPods.Insert(pod.Name)
343+
}
344+
}
345+
346+
for i := 0; i < 10; i++ {
347+
maxUnavailable := rand.Intn(10)
348+
t.Logf("%d iteration, maxUnavailable=%d", i+1, maxUnavailable)
349+
intStr = intstr.FromInt(maxUnavailable)
350+
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
351+
ds.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("foo2/bar3-%d", i)
352+
err = manager.dsStore.Update(ds)
353+
if err != nil {
354+
t.Fatal(err)
355+
}
356+
357+
// only the 10 unavailable pods will be allowed to be updated
358+
clearExpectations(t, manager, ds, podControl)
359+
expectSyncDaemonSets(t, manager, ds, podControl, 0, 10, 0)
360+
361+
clearExpectations(t, manager, ds, podControl)
362+
expectSyncDaemonSets(t, manager, ds, podControl, 10, 0, 0)
363+
364+
clearExpectations(t, manager, ds, podControl)
365+
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
366+
clearExpectations(t, manager, ds, podControl)
367+
368+
// verify that the ready pods are never touched
369+
readyPods := sets.Set[string]{}
370+
t.Logf("looking for old ready pods: %s", strings.Join(oldReadyPods.UnsortedList(), ", "))
371+
for _, obj := range podControl.podStore.List() {
372+
pod := obj.(*v1.Pod)
373+
if podutil.IsPodReady(pod) {
374+
readyPods.Insert(pod.Name)
375+
}
376+
}
377+
if !readyPods.HasAll(oldReadyPods.UnsortedList()...) {
378+
t.Errorf("pods have changed in %d-th iteration: %s", i,
379+
strings.Join(oldReadyPods.Difference(readyPods).UnsortedList(), ", "))
380+
}
381+
}
382+
383+
maxUnavailable = 11
384+
intStr = intstr.FromInt(maxUnavailable)
385+
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
386+
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar4"
387+
err = manager.dsStore.Update(ds)
388+
if err != nil {
389+
t.Fatal(err)
390+
}
391+
392+
clearExpectations(t, manager, ds, podControl)
393+
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
394+
395+
clearExpectations(t, manager, ds, podControl)
396+
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
397+
398+
clearExpectations(t, manager, ds, podControl)
399+
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
400+
clearExpectations(t, manager, ds, podControl)
401+
402+
// verify that the ready pods are never touched
403+
readyPods := sets.Set[string]{}
404+
for _, obj := range podControl.podStore.List() {
405+
pod := obj.(*v1.Pod)
406+
if podutil.IsPodReady(pod) {
407+
readyPods.Insert(pod.Name)
408+
}
409+
}
410+
if readyPods.Len() != 9 {
411+
t.Errorf("readyPods are different than expected, should be 9 but is %s", strings.Join(readyPods.UnsortedList(), ", "))
412+
}
413+
}
414+
415+
func TestDaemonSetUpdatesAllOldNotReadyPodsAndNewNotReadyPods(t *testing.T) {
416+
_, ctx := ktesting.NewTestContext(t)
417+
ds := newDaemonSet("foo")
418+
manager, podControl, _, err := newTestController(ctx, ds)
419+
if err != nil {
420+
t.Fatalf("error creating DaemonSets controller: %v", err)
421+
}
422+
addNodes(manager.nodeStore, 0, 100, nil)
423+
err = manager.dsStore.Add(ds)
424+
if err != nil {
425+
t.Fatal(err)
426+
}
427+
expectSyncDaemonSets(t, manager, ds, podControl, 100, 0, 0)
428+
markPodsReady(podControl.podStore)
429+
var hash1 string
430+
// at this point we have 100 pods that belong to the daemonset,
431+
// and we mark the controller revision which will be used later on to fake old pods
432+
for _, obj := range podControl.podStore.List() {
433+
pod := obj.(*v1.Pod)
434+
hash1 = pod.Labels[apps.ControllerRevisionHashLabelKey]
435+
break
436+
}
437+
438+
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
439+
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
440+
maxUnavailable := 10
441+
intStr := intstr.FromInt(maxUnavailable)
442+
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
443+
err = manager.dsStore.Update(ds)
444+
if err != nil {
445+
t.Fatal(err)
446+
}
447+
// we need to iterate 10 times, since we allow 10 max unavailable, to reach 100 nodes rollout
448+
for i := 0; i < 10; i++ {
449+
clearExpectations(t, manager, ds, podControl)
450+
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
451+
452+
clearExpectations(t, manager, ds, podControl)
453+
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
454+
// make sure to mark the pods ready, otherwise the followup rollouts will fail
455+
markPodsReady(podControl.podStore)
456+
}
457+
458+
clearExpectations(t, manager, ds, podControl)
459+
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
460+
clearExpectations(t, manager, ds, podControl)
461+
462+
// to reach the following situation
463+
// - maxUnavailable 10
464+
// - 88 unavailable new pods
465+
// - 2 unavailable old pods
466+
// - 10 available old pods
467+
oldUnavailablePods := sets.Set[string]{}
468+
for i, obj := range podControl.podStore.List() {
469+
pod := obj.(*v1.Pod)
470+
// mark the latter 90 pods not ready
471+
if i >= 10 {
472+
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionFalse}
473+
podutil.UpdatePodCondition(&pod.Status, &condition)
474+
}
475+
// mark the first 12 pods with older hash
476+
if i < 12 {
477+
pod.Labels[apps.ControllerRevisionHashLabelKey] = hash1
478+
// note down 2 not available old pods
479+
if i >= 10 {
480+
oldUnavailablePods.Insert(pod.Name)
481+
}
482+
}
483+
}
484+
485+
clearExpectations(t, manager, ds, podControl)
486+
t.Logf("expect 2 old pods deletion in 1st iteration")
487+
expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
488+
489+
clearExpectations(t, manager, ds, podControl)
490+
t.Logf("expect 2 new pods creation in 2nd iteration")
491+
expectSyncDaemonSets(t, manager, ds, podControl, 2, 0, 0)
492+
493+
clearExpectations(t, manager, ds, podControl)
494+
t.Logf("expect no modifications in 3rd iteration")
495+
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
496+
clearExpectations(t, manager, ds, podControl)
497+
498+
// check if oldUnavailablePods were replaced
499+
t.Logf("Looking for old pods %s", strings.Join(oldUnavailablePods.UnsortedList(), ", "))
500+
notUpdatedOldPods := sets.Set[string]{}
501+
for _, obj := range podControl.podStore.List() {
502+
pod := obj.(*v1.Pod)
503+
if oldUnavailablePods.Has(pod.Name) {
504+
notUpdatedOldPods.Insert(pod.Name)
505+
}
506+
}
507+
if notUpdatedOldPods.Len() > 0 {
508+
t.Fatalf("found not updated old pods: %s", strings.Join(notUpdatedOldPods.UnsortedList(), ", "))
509+
}
510+
}
297511

298512
func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
299513
_, ctx := ktesting.NewTestContext(t)

0 commit comments

Comments
 (0)