Skip to content

Commit f032068

Browse files
committed
Focus on restart numbers instead of timing
Signed-off-by: Laura Lorenz <[email protected]>
1 parent bad037b commit f032068

File tree

2 files changed

+24
-23
lines changed

2 files changed

+24
-23
lines changed

test/e2e/framework/pod/wait.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -871,7 +871,7 @@ func WaitForContainerTerminated(ctx context.Context, c clientset.Interface, name
871871

872872
// WaitForContainerRestartedNTimes waits for the given Pod container to have restarted N times
873873
func WaitForContainerRestartedNTimes(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, timeout time.Duration, target int) error {
874-
conditionDesc := fmt.Sprintf("container %s restarted %d times", containerName, target)
874+
conditionDesc := fmt.Sprintf("container %s restarted at least %d times", containerName, target)
875875
return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
876876
for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} {
877877
for _, cs := range statuses {

test/e2e_node/container_restart_test.go

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,14 @@ import (
1313
v1 "k8s.io/api/core/v1"
1414
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1515
"k8s.io/apimachinery/pkg/util/uuid"
16-
kubeletevents "k8s.io/kubernetes/pkg/kubelet/events"
1716
"k8s.io/kubernetes/test/e2e/feature"
1817
"k8s.io/kubernetes/test/e2e/framework"
1918
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
2019
admissionapi "k8s.io/pod-security-admission/api"
2120
)
2221

22+
const containerName = "restarts"
23+
2324
var _ = SIGDescribe("Container Restart", feature.CriProxy, framework.WithSerial(), func() {
2425
f := framework.NewDefaultFramework("container-restart")
2526
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
@@ -38,8 +39,8 @@ var _ = SIGDescribe("Container Restart", feature.CriProxy, framework.WithSerial(
3839
})
3940

4041
ginkgo.It("Container restart backs off.", func(ctx context.Context) {
41-
// 3 would take 10s best case, 6 would take 150s best case
42-
doTest(ctx, f, 5, time.Duration(80*time.Second), time.Duration(10*time.Second))
42+
// 0s, 0s, 10s, 30s, 70s, 150s, 310s
43+
doTest(ctx, f, 5, containerName, 7)
4344
})
4445
})
4546

@@ -62,41 +63,42 @@ var _ = SIGDescribe("Container Restart", feature.CriProxy, framework.WithSerial(
6263
})
6364

6465
ginkgo.It("Alternate restart backs off.", func(ctx context.Context) {
65-
doTest(ctx, f, 7, time.Duration(120*time.Second), time.Duration(10*time.Second))
66+
// 0s, 0s, 10s, 30s, 60s, 90s, 120s, 150, 180, 210)
67+
doTest(ctx, f, 7, containerName, 10)
6668
})
6769
})
6870
})
6971

70-
func doTest(ctx context.Context, f *framework.Framework, maxRestarts int, target time.Duration, threshold time.Duration) {
72+
func doTest(ctx context.Context, f *framework.Framework, targetRestarts int, containerName string, maxRestarts int) {
7173

7274
pod := e2epod.NewPodClient(f).Create(ctx, newFailAlwaysPod())
7375
podErr := e2epod.WaitForPodContainerToFail(ctx, f.ClientSet, f.Namespace.Name, pod.Name, 0, "CrashLoopBackOff", 1*time.Minute)
7476
gomega.Expect(podErr).To(gomega.HaveOccurred())
7577

76-
// Wait for 120s worth of backoffs to occur so we can confirm the backoff growth.
77-
podErr = e2epod.WaitForContainerRestartedNTimes(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "restart", 150*time.Second, maxRestarts)
78+
// Wait for 150s worth of backoffs to occur so we can confirm the backoff growth.
79+
podErr = e2epod.WaitForContainerRestartedNTimes(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "restart", 150*time.Second, targetRestarts)
7880
gomega.Expect(podErr).ShouldNot(gomega.HaveOccurred(), "Expected container to repeatedly back off container failures")
7981

80-
d, err := getContainerRetryDuration(ctx, f, pod.Name)
82+
r, err := extractObservedBackoff(ctx, f, pod.Name, containerName)
8183
framework.ExpectNoError(err)
8284

83-
gomega.Expect(d).Should(gomega.BeNumerically("~", target, threshold))
85+
gomega.Expect(r).Should(gomega.BeNumerically("<=", maxRestarts))
8486
}
8587

86-
func getContainerRetryDuration(ctx context.Context, f *framework.Framework, podName string) (time.Duration, error) {
87-
88-
var d time.Duration
89-
e, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
88+
func extractObservedBackoff(ctx context.Context, f *framework.Framework, podName string, containerName string) (int32, error) {
89+
var r int32
90+
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, podName, metav1.GetOptions{})
9091
if err != nil {
91-
return d, err
92+
return r, err
9293
}
93-
94-
for _, event := range e.Items {
95-
if event.InvolvedObject.Name == podName && event.Reason == kubeletevents.StartedContainer {
96-
return event.LastTimestamp.Time.Sub(event.FirstTimestamp.Time), nil
94+
for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} {
95+
for _, cs := range statuses {
96+
if cs.Name == containerName {
97+
return r, nil
98+
}
9799
}
98100
}
99-
return d, nil
101+
return r, nil
100102
}
101103

102104
func newFailAlwaysPod() *v1.Pod {
@@ -108,10 +110,9 @@ func newFailAlwaysPod() *v1.Pod {
108110
Spec: v1.PodSpec{
109111
Containers: []v1.Container{
110112
{
111-
Name: "restart",
113+
Name: containerName,
112114
Image: imageutils.GetBusyBoxImageName(),
113-
ImagePullPolicy: v1.PullAlways,
114-
Command: []string{"exit 1"},
115+
ImagePullPolicy: v1.PullIfNotPresent,
115116
},
116117
},
117118
},

0 commit comments

Comments
 (0)