Skip to content

Commit 9d4e272

Browse files
committed
add e2e test for pod grace period being overridden
1 parent 7c85784 commit 9d4e272

File tree

1 file changed

+68
-0
lines changed

1 file changed

+68
-0
lines changed

test/e2e_node/eviction_test.go

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -248,6 +248,44 @@ var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.
248248
})
249249
})
250250

251+
var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodSeconds", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() {
252+
f := framework.NewDefaultFramework("localstorage-eviction-test")
253+
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
254+
pressureTimeout := 10 * time.Minute
255+
expectedNodeCondition := v1.NodeDiskPressure
256+
expectedStarvedResource := v1.ResourceEphemeralStorage
257+
258+
evictionMaxPodGracePeriod := 30
259+
evictionSoftGracePeriod := 30
260+
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
261+
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
262+
diskConsumed := resource.MustParse("4Gi")
263+
summary := eventuallyGetSummary(ctx)
264+
availableBytes := *(summary.Node.Fs.AvailableBytes)
265+
if availableBytes <= uint64(diskConsumed.Value()) {
266+
e2eskipper.Skipf("Too little disk free on the host for the LocalStorageSoftEviction test to run")
267+
}
268+
initialConfig.EvictionSoft = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
269+
initialConfig.EvictionSoftGracePeriod = map[string]string{string(evictionapi.SignalNodeFsAvailable): "30s"}
270+
// Defer to the pod default grace period
271+
initialConfig.EvictionMaxPodGracePeriod = int32(evictionMaxPodGracePeriod)
272+
initialConfig.EvictionMinimumReclaim = map[string]string{}
273+
// Ensure that pods are not evicted because of the eviction-hard threshold
274+
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
275+
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
276+
})
277+
278+
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{
279+
{
280+
evictionMaxPodGracePeriod: evictionSoftGracePeriod,
281+
evictionSoftGracePeriod: evictionMaxPodGracePeriod,
282+
evictionPriority: 1,
283+
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}),
284+
},
285+
})
286+
})
287+
})
288+
251289
// This test validates that in-memory EmptyDir's are evicted when the Kubelet does
252290
// not have Sized Memory Volumes enabled. When Sized volumes are enabled, it's
253291
// not possible to exhaust the quota.
@@ -551,6 +589,9 @@ type podEvictSpec struct {
551589
evictionPriority int
552590
pod *v1.Pod
553591
wantPodDisruptionCondition *v1.PodConditionType
592+
593+
evictionMaxPodGracePeriod int
594+
evictionSoftGracePeriod int
554595
}
555596

556597
// runEvictionTest sets up a testing environment given the provided pods, and checks a few things:
@@ -589,16 +630,21 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
589630
}, pressureTimeout, evictionPollInterval).Should(gomega.BeNil())
590631

591632
ginkgo.By("Waiting for evictions to occur")
633+
nodeUnreadyTime := time.Now()
634+
592635
gomega.Eventually(ctx, func(ctx context.Context) error {
593636
if expectedNodeCondition != noPressure {
594637
if hasNodeCondition(ctx, f, expectedNodeCondition) {
595638
framework.Logf("Node has %s", expectedNodeCondition)
596639
} else {
597640
framework.Logf("Node does NOT have %s", expectedNodeCondition)
641+
nodeUnreadyTime = time.Now()
598642
}
599643
}
600644
logKubeletLatencyMetrics(ctx, kubeletmetrics.EvictionStatsAgeKey)
601645
logFunc(ctx)
646+
647+
verifyEvictionPeriod(ctx, f, testSpecs, nodeUnreadyTime)
602648
return verifyEvictionOrdering(ctx, f, testSpecs)
603649
}, pressureTimeout, evictionPollInterval).Should(gomega.Succeed())
604650

@@ -770,6 +816,28 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
770816
return fmt.Errorf("pods that should be evicted are still running: %#v", pendingPods)
771817
}
772818

819+
func verifyEvictionPeriod(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec, nodeUnreadyTime time.Time) {
820+
for i, spec := range testSpecs {
821+
if spec.evictionMaxPodGracePeriod == 0 && spec.evictionSoftGracePeriod == 0 {
822+
continue
823+
}
824+
softEvictionPeriod := spec.evictionMaxPodGracePeriod + spec.evictionSoftGracePeriod
825+
826+
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, spec.pod.Name, metav1.GetOptions{})
827+
framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
828+
829+
minSoftEvictionPeriod := min(float64(softEvictionPeriod), float64(*spec.pod.Spec.TerminationGracePeriodSeconds+int64(spec.evictionSoftGracePeriod)))
830+
if pod.Status.Phase == v1.PodFailed {
831+
if time.Since(nodeUnreadyTime).Seconds() > minSoftEvictionPeriod+15 {
832+
framework.Failf("pod %s should be evicted within %f seconds, but it has not been evicted for %f seconds.", pod.Name, minSoftEvictionPeriod, time.Since(nodeUnreadyTime).Seconds())
833+
} else {
834+
testSpecs[i].evictionMaxPodGracePeriod = 0
835+
testSpecs[i].evictionSoftGracePeriod = 0
836+
}
837+
}
838+
}
839+
}
840+
773841
func verifyPodConditions(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec) {
774842
for _, spec := range testSpecs {
775843
if spec.wantPodDisruptionCondition != nil {

0 commit comments

Comments
 (0)