Skip to content

Commit 8095ea3

Browse files
authored
Merge pull request kubernetes#120679 from kannon92/fix-eviction-e2e-crio
Potential Fix for Eviction Tests
2 parents 3cb3e8b + 1ae5429 commit 8095ea3

File tree

1 file changed

+26
-8
lines changed

1 file changed

+26
-8
lines changed

test/e2e_node/eviction_test.go

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -467,7 +467,7 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru
467467
var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
468468
f := framework.NewDefaultFramework("pidpressure-eviction-test")
469469
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
470-
pressureTimeout := 3 * time.Minute
470+
pressureTimeout := 10 * time.Minute
471471
expectedNodeCondition := v1.NodePIDPressure
472472
expectedStarvedResource := noStarvedResource
473473

@@ -717,7 +717,8 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
717717
}
718718
}
719719
gomega.Expect(priorityPod).NotTo(gomega.BeNil())
720-
gomega.Expect(priorityPod.Status.Phase).ToNot(gomega.Equal(v1.PodSucceeded), "pod: %s succeeded unexpectedly", priorityPod.Name)
720+
gomega.Expect(priorityPod.Status.Phase).ToNot(gomega.Equal(v1.PodSucceeded),
721+
fmt.Sprintf("pod: %s succeeded unexpectedly", priorityPod.Name))
721722

722723
// Check eviction ordering.
723724
// Note: it is alright for a priority 1 and priority 2 pod (for example) to fail in the same round,
@@ -731,8 +732,9 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
731732
}
732733
gomega.Expect(lowPriorityPod).NotTo(gomega.BeNil())
733734
if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
734-
gomega.Expect(priorityPod.Status.Phase).ToNot(gomega.Equal(v1.PodFailed), "priority %d pod: %s failed before priority %d pod: %s",
735-
priorityPodSpec.evictionPriority, priorityPodSpec.pod.Name, lowPriorityPodSpec.evictionPriority, lowPriorityPodSpec.pod.Name)
735+
gomega.Expect(priorityPod.Status.Phase).ToNot(gomega.Equal(v1.PodFailed),
736+
fmt.Sprintf("priority %d pod: %s failed before priority %d pod: %s",
737+
priorityPodSpec.evictionPriority, priorityPodSpec.pod.Name, lowPriorityPodSpec.evictionPriority, lowPriorityPodSpec.pod.Name))
736738
}
737739
}
738740

@@ -743,7 +745,8 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
743745

744746
// EvictionPriority 0 pods should not fail
745747
if priorityPodSpec.evictionPriority == 0 {
746-
gomega.Expect(priorityPod.Status.Phase).ToNot(gomega.Equal(v1.PodFailed), "priority 0 pod: %s failed", priorityPod.Name)
748+
gomega.Expect(priorityPod.Status.Phase).ToNot(gomega.Equal(v1.PodFailed),
749+
fmt.Sprintf("priority 0 pod: %s failed", priorityPod.Name))
747750
}
748751

749752
// If a pod that is not evictionPriority 0 has not been evicted, we are not done
@@ -947,10 +950,15 @@ func eventuallyGetSummary(ctx context.Context) (s *kubeletstatsv1alpha1.Summary)
947950

948951
// returns a pod that does not use any resources
949952
func innocentPod() *v1.Pod {
953+
// Due to https://github.com/kubernetes/kubernetes/issues/115819,
954+
// When evictionHard to used, we were setting grace period to 0 which meant the default setting (30 seconds)
955+
// This could help with flakiness as we should send sigterm right away.
956+
var gracePeriod int64 = 1
950957
return &v1.Pod{
951958
ObjectMeta: metav1.ObjectMeta{Name: "innocent-pod"},
952959
Spec: v1.PodSpec{
953-
RestartPolicy: v1.RestartPolicyNever,
960+
RestartPolicy: v1.RestartPolicyNever,
961+
TerminationGracePeriodSeconds: &gracePeriod,
954962
Containers: []v1.Container{
955963
{
956964
Image: busyboxImage,
@@ -996,6 +1004,10 @@ func pidConsumingPod(name string, numProcesses int) *v1.Pod {
9961004

9971005
// podWithCommand returns a pod with the provided volumeSource and resourceRequirements.
9981006
func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirements, iterations int, name, command string) *v1.Pod {
1007+
// Due to https://github.com/kubernetes/kubernetes/issues/115819,
1008+
// When evictionHard to used, we were setting grace period to 0 which meant the default setting (30 seconds)
1009+
// This could help with flakiness as we should send sigterm right away.
1010+
var gracePeriod int64 = 1
9991011
volumeMounts := []v1.VolumeMount{}
10001012
volumes := []v1.Volume{}
10011013
if volumeSource != nil {
@@ -1005,7 +1017,8 @@ func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirem
10051017
return &v1.Pod{
10061018
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-pod", name)},
10071019
Spec: v1.PodSpec{
1008-
RestartPolicy: v1.RestartPolicyNever,
1020+
RestartPolicy: v1.RestartPolicyNever,
1021+
TerminationGracePeriodSeconds: &gracePeriod,
10091022
Containers: []v1.Container{
10101023
{
10111024
Image: busyboxImage,
@@ -1025,6 +1038,10 @@ func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirem
10251038
}
10261039

10271040
func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *v1.Pod {
1041+
// Due to https://github.com/kubernetes/kubernetes/issues/115819,
1042+
// When evictionHard to used, we were setting grace period to 0 which meant the default setting (30 seconds)
1043+
// This could help with flakiness as we should send sigterm right away.
1044+
var gracePeriod int64 = 1
10281045
env := []v1.EnvVar{
10291046
{
10301047
Name: "MEMORY_LIMIT",
@@ -1053,7 +1070,8 @@ func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *
10531070
Name: podName,
10541071
},
10551072
Spec: v1.PodSpec{
1056-
RestartPolicy: v1.RestartPolicyNever,
1073+
RestartPolicy: v1.RestartPolicyNever,
1074+
TerminationGracePeriodSeconds: &gracePeriod,
10571075
Containers: []v1.Container{
10581076
{
10591077
Name: ctnName,

0 commit comments

Comments
 (0)