@@ -103,8 +103,11 @@ var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(
103
103
})
104
104
})
105
105
106
- // ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
107
- // Disk pressure is induced by pulling large images
106
+ // ImageGCNoEviction tests that the eviction manager is able to prevent eviction
107
+ // by reclaiming resources(inodes) through image garbage collection.
108
+ // Disk pressure is induced by consuming a lot of inodes on the node.
109
+ // Images are pre-pulled before running the test workload to ensure
110
+ // that the image garbage collerctor can remove them to avoid eviction.
108
111
var _ = SIGDescribe ("ImageGCNoEviction" , framework .WithSlow (), framework .WithSerial (), framework .WithDisruptive (), feature .Eviction , func () {
109
112
f := framework .NewDefaultFramework ("image-gc-eviction-test" )
110
113
f .NamespacePodSecurityLevel = admissionapi .LevelPrivileged
@@ -113,6 +116,17 @@ var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSer
113
116
expectedStarvedResource := resourceInodes
114
117
inodesConsumed := uint64 (100000 )
115
118
ginkgo .Context (fmt .Sprintf (testContextFmt , expectedNodeCondition ), func () {
119
+ prepull := func (ctx context.Context ) {
120
+ // Prepull images for image garbage collector to remove them
121
+ // when reclaiming resources
122
+ err := PrePullAllImages (ctx )
123
+ gomega .Expect (err ).ShouldNot (gomega .HaveOccurred ())
124
+ }
125
+ ginkgo .BeforeEach (prepull )
126
+ if framework .TestContext .PrepullImages {
127
+ ginkgo .AfterEach (prepull )
128
+ }
129
+
116
130
tempSetCurrentKubeletConfig (f , func (ctx context.Context , initialConfig * kubeletconfig.KubeletConfiguration ) {
117
131
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
118
132
summary := eventuallyGetSummary (ctx )
@@ -646,17 +660,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
646
660
})
647
661
648
662
ginkgo .AfterEach (func (ctx context.Context ) {
649
- prePullImagesIfNeccecary := func () {
650
- if expectedNodeCondition == v1 .NodeDiskPressure && framework .TestContext .PrepullImages {
651
- // The disk eviction test may cause the prepulled images to be evicted,
652
- // prepull those images again to ensure this test not affect following tests.
653
- err := PrePullAllImages (ctx )
654
- gomega .Expect (err ).ShouldNot (gomega .HaveOccurred ())
655
- }
656
- }
657
- // Run prePull using a defer to make sure it is executed even when the assertions below fails
658
- defer prePullImagesIfNeccecary ()
659
-
660
663
ginkgo .By ("deleting pods" )
661
664
for _ , spec := range testSpecs {
662
665
ginkgo .By (fmt .Sprintf ("deleting pod: %s" , spec .pod .Name ))
@@ -674,17 +677,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
674
677
}, pressureDisappearTimeout , evictionPollInterval ).Should (gomega .BeNil ())
675
678
676
679
reduceAllocatableMemoryUsageIfCgroupv1 ()
677
- ginkgo .By ("making sure we have all the required images for testing" )
678
- prePullImagesIfNeccecary ()
679
-
680
- // Ensure that the NodeCondition hasn't returned after pulling images
681
- ginkgo .By (fmt .Sprintf ("making sure NodeCondition %s doesn't exist again after pulling images" , expectedNodeCondition ))
682
- gomega .Eventually (ctx , func (ctx context.Context ) error {
683
- if expectedNodeCondition != noPressure && hasNodeCondition (ctx , f , expectedNodeCondition ) {
684
- return fmt .Errorf ("Conditions haven't returned to normal, node still has %s" , expectedNodeCondition )
685
- }
686
- return nil
687
- }, pressureDisappearTimeout , evictionPollInterval ).Should (gomega .BeNil ())
688
680
689
681
ginkgo .By ("making sure we can start a new pod after the test" )
690
682
podName := "test-admit-pod"
0 commit comments