Skip to content

Commit b82260f

Browse files
authored
Merge pull request kubernetes#130391 from bart0sh/PR174-e2e_node-fix-eviction-kubetest2
e2e_node: fix ImageGCNoEviction test for kubetest2
2 parents ef47225 + 4c0b24b commit b82260f

File tree

1 file changed

+30
-28
lines changed

1 file changed

+30
-28
lines changed

test/e2e_node/eviction_test.go

Lines changed: 30 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,11 @@ var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(
103103
})
104104
})
105105

106-
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
107-
// Disk pressure is induced by pulling large images
106+
// ImageGCNoEviction tests that the eviction manager is able to prevent eviction
107+
// by reclaiming resources(inodes) through image garbage collection.
108+
// Disk pressure is induced by consuming a lot of inodes on the node.
109+
// Images are pre-pulled before running the test workload to ensure
110+
// that the image garbage collerctor can remove them to avoid eviction.
108111
var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
109112
f := framework.NewDefaultFramework("image-gc-eviction-test")
110113
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
@@ -113,13 +116,25 @@ var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSer
113116
expectedStarvedResource := resourceInodes
114117
inodesConsumed := uint64(100000)
115118
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
119+
prepull := func(ctx context.Context) {
120+
// Prepull images for image garbage collector to remove them
121+
// when reclaiming resources
122+
err := PrePullAllImages(ctx)
123+
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
124+
}
125+
ginkgo.BeforeEach(prepull)
126+
if framework.TestContext.PrepullImages {
127+
ginkgo.AfterEach(prepull)
128+
}
129+
116130
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
117131
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
118132
summary := eventuallyGetSummary(ctx)
119133
inodesFree := *summary.Node.Fs.InodesFree
120134
if inodesFree <= inodesConsumed {
121135
e2eskipper.Skipf("Too few inodes free on the host for the InodeEviction test to run")
122136
}
137+
framework.Logf("Setting eviction threshold to %d inodes", inodesFree-inodesConsumed)
123138
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsInodesFree): fmt.Sprintf("%d", inodesFree-inodesConsumed)}
124139
initialConfig.EvictionMinimumReclaim = map[string]string{}
125140
})
@@ -645,17 +660,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
645660
})
646661

647662
ginkgo.AfterEach(func(ctx context.Context) {
648-
prePullImagesIfNeccecary := func() {
649-
if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages {
650-
// The disk eviction test may cause the prepulled images to be evicted,
651-
// prepull those images again to ensure this test not affect following tests.
652-
err := PrePullAllImages(ctx)
653-
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
654-
}
655-
}
656-
// Run prePull using a defer to make sure it is executed even when the assertions below fails
657-
defer prePullImagesIfNeccecary()
658-
659663
ginkgo.By("deleting pods")
660664
for _, spec := range testSpecs {
661665
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
@@ -673,17 +677,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
673677
}, pressureDisappearTimeout, evictionPollInterval).Should(gomega.BeNil())
674678

675679
reduceAllocatableMemoryUsageIfCgroupv1()
676-
ginkgo.By("making sure we have all the required images for testing")
677-
prePullImagesIfNeccecary()
678-
679-
// Ensure that the NodeCondition hasn't returned after pulling images
680-
ginkgo.By(fmt.Sprintf("making sure NodeCondition %s doesn't exist again after pulling images", expectedNodeCondition))
681-
gomega.Eventually(ctx, func(ctx context.Context) error {
682-
if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) {
683-
return fmt.Errorf("Conditions haven't returned to normal, node still has %s", expectedNodeCondition)
684-
}
685-
return nil
686-
}, pressureDisappearTimeout, evictionPollInterval).Should(gomega.BeNil())
687680

688681
ginkgo.By("making sure we can start a new pod after the test")
689682
podName := "test-admit-pod"
@@ -924,16 +917,25 @@ func logDiskMetrics(ctx context.Context) {
924917
if summary.Node.Fs != nil && summary.Node.Fs.CapacityBytes != nil && summary.Node.Fs.AvailableBytes != nil {
925918
framework.Logf("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d", *summary.Node.Fs.CapacityBytes, *summary.Node.Fs.AvailableBytes)
926919
}
920+
if summary.Node.Fs != nil && summary.Node.Fs.Inodes != nil && summary.Node.Fs.InodesUsed != nil && summary.Node.Fs.InodesFree != nil {
921+
framework.Logf("rootFsInfo.Inodes: %d, rootFsInfo.InodesUsed: %d, rootFsInfo.InodesFree: %d", *summary.Node.Fs.Inodes, *summary.Node.Fs.InodesUsed, *summary.Node.Fs.InodesFree)
922+
}
927923
for _, pod := range summary.Pods {
928924
framework.Logf("Pod: %s", pod.PodRef.Name)
929925
for _, container := range pod.Containers {
930-
if container.Rootfs != nil && container.Rootfs.UsedBytes != nil {
931-
framework.Logf("--- summary Container: %s UsedBytes: %d", container.Name, *container.Rootfs.UsedBytes)
926+
if container.Rootfs != nil && container.Rootfs.UsedBytes != nil && container.Rootfs.AvailableBytes != nil {
927+
framework.Logf("--- summary Container: %s UsedBytes: %d, AvailableBytes: %d", container.Name, *container.Rootfs.UsedBytes, *container.Rootfs.AvailableBytes)
928+
}
929+
if container.Rootfs != nil && container.Rootfs.Inodes != nil && container.Rootfs.InodesUsed != nil && container.Rootfs.InodesFree != nil {
930+
framework.Logf("--- summary Container: %s Inodes: %d, InodesUsed: %d, InodesFree: %d", container.Name, *container.Rootfs.Inodes, *container.Rootfs.InodesUsed, *container.Rootfs.InodesFree)
932931
}
933932
}
934933
for _, volume := range pod.VolumeStats {
935-
if volume.FsStats.InodesUsed != nil {
936-
framework.Logf("--- summary Volume: %s UsedBytes: %d", volume.Name, *volume.FsStats.UsedBytes)
934+
if volume.FsStats.UsedBytes != nil && volume.FsStats.AvailableBytes != nil {
935+
framework.Logf("--- summary Volume: %s UsedBytes: %d, AvailableBytest: %d", volume.Name, *volume.FsStats.UsedBytes, *volume.FsStats.AvailableBytes)
936+
}
937+
if volume.FsStats.Inodes != nil && volume.FsStats.InodesUsed != nil && volume.FsStats.InodesFree != nil {
938+
framework.Logf("--- summary Volume: %s Inodes: %d, InodesUsed: %d, InodesFree: %d", volume.Name, *volume.FsStats.Inodes, *volume.FsStats.InodesUsed, *volume.FsStats.InodesFree)
937939
}
938940
}
939941
}

0 commit comments

Comments
 (0)