@@ -467,7 +467,7 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru
467
467
var _ = SIGDescribe ("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]" , func () {
468
468
f := framework .NewDefaultFramework ("pidpressure-eviction-test" )
469
469
f .NamespacePodSecurityLevel = admissionapi .LevelPrivileged
470
- pressureTimeout := 3 * time .Minute
470
+ pressureTimeout := 10 * time .Minute
471
471
expectedNodeCondition := v1 .NodePIDPressure
472
472
expectedStarvedResource := noStarvedResource
473
473
@@ -717,7 +717,8 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
717
717
}
718
718
}
719
719
gomega .Expect (priorityPod ).NotTo (gomega .BeNil ())
720
- gomega .Expect (priorityPod .Status .Phase ).ToNot (gomega .Equal (v1 .PodSucceeded ), "pod: %s succeeded unexpectedly" , priorityPod .Name )
720
+ gomega .Expect (priorityPod .Status .Phase ).ToNot (gomega .Equal (v1 .PodSucceeded ),
721
+ fmt .Sprintf ("pod: %s succeeded unexpectedly" , priorityPod .Name ))
721
722
722
723
// Check eviction ordering.
723
724
// Note: it is alright for a priority 1 and priority 2 pod (for example) to fail in the same round,
@@ -731,8 +732,9 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
731
732
}
732
733
gomega .Expect (lowPriorityPod ).NotTo (gomega .BeNil ())
733
734
if priorityPodSpec .evictionPriority < lowPriorityPodSpec .evictionPriority && lowPriorityPod .Status .Phase == v1 .PodRunning {
734
- gomega .Expect (priorityPod .Status .Phase ).ToNot (gomega .Equal (v1 .PodFailed ), "priority %d pod: %s failed before priority %d pod: %s" ,
735
- priorityPodSpec .evictionPriority , priorityPodSpec .pod .Name , lowPriorityPodSpec .evictionPriority , lowPriorityPodSpec .pod .Name )
735
+ gomega .Expect (priorityPod .Status .Phase ).ToNot (gomega .Equal (v1 .PodFailed ),
736
+ fmt .Sprintf ("priority %d pod: %s failed before priority %d pod: %s" ,
737
+ priorityPodSpec .evictionPriority , priorityPodSpec .pod .Name , lowPriorityPodSpec .evictionPriority , lowPriorityPodSpec .pod .Name ))
736
738
}
737
739
}
738
740
@@ -743,7 +745,8 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
743
745
744
746
// EvictionPriority 0 pods should not fail
745
747
if priorityPodSpec .evictionPriority == 0 {
746
- gomega .Expect (priorityPod .Status .Phase ).ToNot (gomega .Equal (v1 .PodFailed ), "priority 0 pod: %s failed" , priorityPod .Name )
748
+ gomega .Expect (priorityPod .Status .Phase ).ToNot (gomega .Equal (v1 .PodFailed ),
749
+ fmt .Sprintf ("priority 0 pod: %s failed" , priorityPod .Name ))
747
750
}
748
751
749
752
// If a pod that is not evictionPriority 0 has not been evicted, we are not done
@@ -947,10 +950,15 @@ func eventuallyGetSummary(ctx context.Context) (s *kubeletstatsv1alpha1.Summary)
947
950
948
951
// returns a pod that does not use any resources
949
952
func innocentPod () * v1.Pod {
953
+ // Due to https://github.com/kubernetes/kubernetes/issues/115819,
954
+ // When evictionHard to used, we were setting grace period to 0 which meant the default setting (30 seconds)
955
+ // This could help with flakiness as we should send sigterm right away.
956
+ var gracePeriod int64 = 1
950
957
return & v1.Pod {
951
958
ObjectMeta : metav1.ObjectMeta {Name : "innocent-pod" },
952
959
Spec : v1.PodSpec {
953
- RestartPolicy : v1 .RestartPolicyNever ,
960
+ RestartPolicy : v1 .RestartPolicyNever ,
961
+ TerminationGracePeriodSeconds : & gracePeriod ,
954
962
Containers : []v1.Container {
955
963
{
956
964
Image : busyboxImage ,
@@ -996,6 +1004,10 @@ func pidConsumingPod(name string, numProcesses int) *v1.Pod {
996
1004
997
1005
// podWithCommand returns a pod with the provided volumeSource and resourceRequirements.
998
1006
func podWithCommand (volumeSource * v1.VolumeSource , resources v1.ResourceRequirements , iterations int , name , command string ) * v1.Pod {
1007
+ // Due to https://github.com/kubernetes/kubernetes/issues/115819,
1008
+ // When evictionHard to used, we were setting grace period to 0 which meant the default setting (30 seconds)
1009
+ // This could help with flakiness as we should send sigterm right away.
1010
+ var gracePeriod int64 = 1
999
1011
volumeMounts := []v1.VolumeMount {}
1000
1012
volumes := []v1.Volume {}
1001
1013
if volumeSource != nil {
@@ -1005,7 +1017,8 @@ func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirem
1005
1017
return & v1.Pod {
1006
1018
ObjectMeta : metav1.ObjectMeta {Name : fmt .Sprintf ("%s-pod" , name )},
1007
1019
Spec : v1.PodSpec {
1008
- RestartPolicy : v1 .RestartPolicyNever ,
1020
+ RestartPolicy : v1 .RestartPolicyNever ,
1021
+ TerminationGracePeriodSeconds : & gracePeriod ,
1009
1022
Containers : []v1.Container {
1010
1023
{
1011
1024
Image : busyboxImage ,
@@ -1025,6 +1038,10 @@ func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirem
1025
1038
}
1026
1039
1027
1040
func getMemhogPod (podName string , ctnName string , res v1.ResourceRequirements ) * v1.Pod {
1041
+ // Due to https://github.com/kubernetes/kubernetes/issues/115819,
1042
+ // When evictionHard to used, we were setting grace period to 0 which meant the default setting (30 seconds)
1043
+ // This could help with flakiness as we should send sigterm right away.
1044
+ var gracePeriod int64 = 1
1028
1045
env := []v1.EnvVar {
1029
1046
{
1030
1047
Name : "MEMORY_LIMIT" ,
@@ -1053,7 +1070,8 @@ func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *
1053
1070
Name : podName ,
1054
1071
},
1055
1072
Spec : v1.PodSpec {
1056
- RestartPolicy : v1 .RestartPolicyNever ,
1073
+ RestartPolicy : v1 .RestartPolicyNever ,
1074
+ TerminationGracePeriodSeconds : & gracePeriod ,
1057
1075
Containers : []v1.Container {
1058
1076
{
1059
1077
Name : ctnName ,
0 commit comments