Skip to content

Commit 08aefc8

Browse files
authored
Merge pull request kubernetes#119362 from pacoxu/add-new-eviction-pid-test
add new e2e test with PodAndContainerStatsFromCRI enabled for pid eviction order
2 parents a45742b + 3a21a03 commit 08aefc8

File tree

1 file changed

+44
-34
lines changed

1 file changed

+44
-34
lines changed

test/e2e_node/eviction_test.go

Lines changed: 44 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -477,42 +477,52 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo
477477
highPriority := int32(999999999)
478478
processes := 30000
479479

480-
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
481-
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
482-
pidsConsumed := int64(10000)
483-
summary := eventuallyGetSummary(ctx)
484-
availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
485-
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
486-
initialConfig.EvictionMinimumReclaim = map[string]string{}
487-
})
488-
ginkgo.BeforeEach(func(ctx context.Context) {
489-
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
490-
if err != nil && !apierrors.IsAlreadyExists(err) {
491-
framework.ExpectNoError(err, "failed to create priority class")
480+
// if criStats is true, PodAndContainerStatsFromCRI will use data from cri instead of cadvisor for kubelet to get pid count of pods
481+
for _, criStats := range []bool{true, false} {
482+
ginkgo.Context(fmt.Sprintf("when we run containers with PodAndContainerStatsFromCRI=%v that should cause %s", criStats, expectedNodeCondition), func() {
483+
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
484+
pidsConsumed := int64(10000)
485+
summary := eventuallyGetSummary(ctx)
486+
availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
487+
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
488+
initialConfig.EvictionMinimumReclaim = map[string]string{}
489+
if initialConfig.FeatureGates == nil {
490+
initialConfig.FeatureGates = make(map[string]bool)
491+
}
492+
if criStats {
493+
initialConfig.FeatureGates["PodAndContainerStatsFromCRI"] = true
494+
}
495+
496+
})
497+
ginkgo.BeforeEach(func(ctx context.Context) {
498+
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
499+
if err != nil && !apierrors.IsAlreadyExists(err) {
500+
framework.ExpectNoError(err, "failed to create priority class")
501+
}
502+
})
503+
ginkgo.AfterEach(func(ctx context.Context) {
504+
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
505+
framework.ExpectNoError(err)
506+
})
507+
specs := []podEvictSpec{
508+
{
509+
evictionPriority: 2,
510+
pod: pidConsumingPod("fork-bomb-container-with-low-priority", processes),
511+
},
512+
{
513+
evictionPriority: 0,
514+
pod: innocentPod(),
515+
},
516+
{
517+
evictionPriority: 1,
518+
pod: pidConsumingPod("fork-bomb-container-with-high-priority", processes),
519+
},
492520
}
521+
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
522+
specs[2].pod.Spec.PriorityClassName = highPriorityClassName
523+
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
493524
})
494-
ginkgo.AfterEach(func(ctx context.Context) {
495-
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
496-
framework.ExpectNoError(err)
497-
})
498-
specs := []podEvictSpec{
499-
{
500-
evictionPriority: 2,
501-
pod: pidConsumingPod("fork-bomb-container-with-low-priority", processes),
502-
},
503-
{
504-
evictionPriority: 0,
505-
pod: innocentPod(),
506-
},
507-
{
508-
evictionPriority: 1,
509-
pod: pidConsumingPod("fork-bomb-container-with-high-priority", processes),
510-
},
511-
}
512-
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
513-
specs[2].pod.Spec.PriorityClassName = highPriorityClassName
514-
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
515-
})
525+
}
516526

517527
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", func() {
518528
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {

0 commit comments

Comments
 (0)