@@ -248,6 +248,44 @@ var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.
248
248
})
249
249
})
250
250
251
+ var _ = SIGDescribe ("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodSeconds" , framework .WithSlow (), framework .WithSerial (), framework .WithDisruptive (), nodefeature .Eviction , func () {
252
+ f := framework .NewDefaultFramework ("localstorage-eviction-test" )
253
+ f .NamespacePodSecurityLevel = admissionapi .LevelPrivileged
254
+ pressureTimeout := 10 * time .Minute
255
+ expectedNodeCondition := v1 .NodeDiskPressure
256
+ expectedStarvedResource := v1 .ResourceEphemeralStorage
257
+
258
+ evictionMaxPodGracePeriod := 30
259
+ evictionSoftGracePeriod := 30
260
+ ginkgo .Context (fmt .Sprintf (testContextFmt , expectedNodeCondition ), func () {
261
+ tempSetCurrentKubeletConfig (f , func (ctx context.Context , initialConfig * kubeletconfig.KubeletConfiguration ) {
262
+ diskConsumed := resource .MustParse ("4Gi" )
263
+ summary := eventuallyGetSummary (ctx )
264
+ availableBytes := * (summary .Node .Fs .AvailableBytes )
265
+ if availableBytes <= uint64 (diskConsumed .Value ()) {
266
+ e2eskipper .Skipf ("Too little disk free on the host for the LocalStorageSoftEviction test to run" )
267
+ }
268
+ initialConfig .EvictionSoft = map [string ]string {string (evictionapi .SignalNodeFsAvailable ): fmt .Sprintf ("%d" , availableBytes - uint64 (diskConsumed .Value ()))}
269
+ initialConfig .EvictionSoftGracePeriod = map [string ]string {string (evictionapi .SignalNodeFsAvailable ): "30s" }
270
+ // Defer to the pod default grace period
271
+ initialConfig .EvictionMaxPodGracePeriod = int32 (evictionMaxPodGracePeriod )
272
+ initialConfig .EvictionMinimumReclaim = map [string ]string {}
273
+ // Ensure that pods are not evicted because of the eviction-hard threshold
274
+ // setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
275
+ initialConfig .EvictionHard = map [string ]string {string (evictionapi .SignalMemoryAvailable ): "0%" }
276
+ })
277
+
278
+ runEvictionTest (f , pressureTimeout , expectedNodeCondition , expectedStarvedResource , logDiskMetrics , []podEvictSpec {
279
+ {
280
+ evictionMaxPodGracePeriod : evictionSoftGracePeriod ,
281
+ evictionSoftGracePeriod : evictionMaxPodGracePeriod ,
282
+ evictionPriority : 1 ,
283
+ pod : diskConsumingPod ("container-disk-hog" , lotsOfDisk , nil , v1.ResourceRequirements {}),
284
+ },
285
+ })
286
+ })
287
+ })
288
+
251
289
// This test validates that in-memory EmptyDir's are evicted when the Kubelet does
252
290
// not have Sized Memory Volumes enabled. When Sized volumes are enabled, it's
253
291
// not possible to exhaust the quota.
@@ -551,6 +589,9 @@ type podEvictSpec struct {
551
589
evictionPriority int
552
590
pod * v1.Pod
553
591
wantPodDisruptionCondition * v1.PodConditionType
592
+
593
+ evictionMaxPodGracePeriod int
594
+ evictionSoftGracePeriod int
554
595
}
555
596
556
597
// runEvictionTest sets up a testing environment given the provided pods, and checks a few things:
@@ -589,16 +630,21 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
589
630
}, pressureTimeout , evictionPollInterval ).Should (gomega .BeNil ())
590
631
591
632
ginkgo .By ("Waiting for evictions to occur" )
633
+ nodeUnreadyTime := time .Now ()
634
+
592
635
gomega .Eventually (ctx , func (ctx context.Context ) error {
593
636
if expectedNodeCondition != noPressure {
594
637
if hasNodeCondition (ctx , f , expectedNodeCondition ) {
595
638
framework .Logf ("Node has %s" , expectedNodeCondition )
596
639
} else {
597
640
framework .Logf ("Node does NOT have %s" , expectedNodeCondition )
641
+ nodeUnreadyTime = time .Now ()
598
642
}
599
643
}
600
644
logKubeletLatencyMetrics (ctx , kubeletmetrics .EvictionStatsAgeKey )
601
645
logFunc (ctx )
646
+
647
+ verifyEvictionPeriod (ctx , f , testSpecs , nodeUnreadyTime )
602
648
return verifyEvictionOrdering (ctx , f , testSpecs )
603
649
}, pressureTimeout , evictionPollInterval ).Should (gomega .Succeed ())
604
650
@@ -770,6 +816,28 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
770
816
return fmt .Errorf ("pods that should be evicted are still running: %#v" , pendingPods )
771
817
}
772
818
819
+ func verifyEvictionPeriod (ctx context.Context , f * framework.Framework , testSpecs []podEvictSpec , nodeUnreadyTime time.Time ) {
820
+ for i , spec := range testSpecs {
821
+ if spec .evictionMaxPodGracePeriod == 0 && spec .evictionSoftGracePeriod == 0 {
822
+ continue
823
+ }
824
+ softEvictionPeriod := spec .evictionMaxPodGracePeriod + spec .evictionSoftGracePeriod
825
+
826
+ pod , err := f .ClientSet .CoreV1 ().Pods (f .Namespace .Name ).Get (ctx , spec .pod .Name , metav1.GetOptions {})
827
+ framework .ExpectNoError (err , "Failed to get the recent pod object for name: %q" , pod .Name )
828
+
829
+ minSoftEvictionPeriod := min (float64 (softEvictionPeriod ), float64 (* spec .pod .Spec .TerminationGracePeriodSeconds + int64 (spec .evictionSoftGracePeriod )))
830
+ if pod .Status .Phase == v1 .PodFailed {
831
+ if time .Since (nodeUnreadyTime ).Seconds () > minSoftEvictionPeriod + 15 {
832
+ framework .Failf ("pod %s should be evicted within %f seconds, but it has not been evicted for %f seconds." , pod .Name , minSoftEvictionPeriod , time .Since (nodeUnreadyTime ).Seconds ())
833
+ } else {
834
+ testSpecs [i ].evictionMaxPodGracePeriod = 0
835
+ testSpecs [i ].evictionSoftGracePeriod = 0
836
+ }
837
+ }
838
+ }
839
+ }
840
+
773
841
func verifyPodConditions (ctx context.Context , f * framework.Framework , testSpecs []podEvictSpec ) {
774
842
for _ , spec := range testSpecs {
775
843
if spec .wantPodDisruptionCondition != nil {
0 commit comments