@@ -103,8 +103,11 @@ var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(
103
103
})
104
104
})
105
105
106
- // ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
107
- // Disk pressure is induced by pulling large images
106
+ // ImageGCNoEviction tests that the eviction manager is able to prevent eviction
107
+ // by reclaiming resources(inodes) through image garbage collection.
108
+ // Disk pressure is induced by consuming a lot of inodes on the node.
109
+ // Images are pre-pulled before running the test workload to ensure
110
+ // that the image garbage collerctor can remove them to avoid eviction.
108
111
var _ = SIGDescribe ("ImageGCNoEviction" , framework .WithSlow (), framework .WithSerial (), framework .WithDisruptive (), feature .Eviction , func () {
109
112
f := framework .NewDefaultFramework ("image-gc-eviction-test" )
110
113
f .NamespacePodSecurityLevel = admissionapi .LevelPrivileged
@@ -113,13 +116,25 @@ var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSer
113
116
expectedStarvedResource := resourceInodes
114
117
inodesConsumed := uint64 (100000 )
115
118
ginkgo .Context (fmt .Sprintf (testContextFmt , expectedNodeCondition ), func () {
119
+ prepull := func (ctx context.Context ) {
120
+ // Prepull images for image garbage collector to remove them
121
+ // when reclaiming resources
122
+ err := PrePullAllImages (ctx )
123
+ gomega .Expect (err ).ShouldNot (gomega .HaveOccurred ())
124
+ }
125
+ ginkgo .BeforeEach (prepull )
126
+ if framework .TestContext .PrepullImages {
127
+ ginkgo .AfterEach (prepull )
128
+ }
129
+
116
130
tempSetCurrentKubeletConfig (f , func (ctx context.Context , initialConfig * kubeletconfig.KubeletConfiguration ) {
117
131
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
118
132
summary := eventuallyGetSummary (ctx )
119
133
inodesFree := * summary .Node .Fs .InodesFree
120
134
if inodesFree <= inodesConsumed {
121
135
e2eskipper .Skipf ("Too few inodes free on the host for the InodeEviction test to run" )
122
136
}
137
+ framework .Logf ("Setting eviction threshold to %d inodes" , inodesFree - inodesConsumed )
123
138
initialConfig .EvictionHard = map [string ]string {string (evictionapi .SignalNodeFsInodesFree ): fmt .Sprintf ("%d" , inodesFree - inodesConsumed )}
124
139
initialConfig .EvictionMinimumReclaim = map [string ]string {}
125
140
})
@@ -645,17 +660,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
645
660
})
646
661
647
662
ginkgo .AfterEach (func (ctx context.Context ) {
648
- prePullImagesIfNeccecary := func () {
649
- if expectedNodeCondition == v1 .NodeDiskPressure && framework .TestContext .PrepullImages {
650
- // The disk eviction test may cause the prepulled images to be evicted,
651
- // prepull those images again to ensure this test not affect following tests.
652
- err := PrePullAllImages (ctx )
653
- gomega .Expect (err ).ShouldNot (gomega .HaveOccurred ())
654
- }
655
- }
656
- // Run prePull using a defer to make sure it is executed even when the assertions below fails
657
- defer prePullImagesIfNeccecary ()
658
-
659
663
ginkgo .By ("deleting pods" )
660
664
for _ , spec := range testSpecs {
661
665
ginkgo .By (fmt .Sprintf ("deleting pod: %s" , spec .pod .Name ))
@@ -673,17 +677,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
673
677
}, pressureDisappearTimeout , evictionPollInterval ).Should (gomega .BeNil ())
674
678
675
679
reduceAllocatableMemoryUsageIfCgroupv1 ()
676
- ginkgo .By ("making sure we have all the required images for testing" )
677
- prePullImagesIfNeccecary ()
678
-
679
- // Ensure that the NodeCondition hasn't returned after pulling images
680
- ginkgo .By (fmt .Sprintf ("making sure NodeCondition %s doesn't exist again after pulling images" , expectedNodeCondition ))
681
- gomega .Eventually (ctx , func (ctx context.Context ) error {
682
- if expectedNodeCondition != noPressure && hasNodeCondition (ctx , f , expectedNodeCondition ) {
683
- return fmt .Errorf ("Conditions haven't returned to normal, node still has %s" , expectedNodeCondition )
684
- }
685
- return nil
686
- }, pressureDisappearTimeout , evictionPollInterval ).Should (gomega .BeNil ())
687
680
688
681
ginkgo .By ("making sure we can start a new pod after the test" )
689
682
podName := "test-admit-pod"
@@ -924,16 +917,25 @@ func logDiskMetrics(ctx context.Context) {
924
917
if summary .Node .Fs != nil && summary .Node .Fs .CapacityBytes != nil && summary .Node .Fs .AvailableBytes != nil {
925
918
framework .Logf ("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d" , * summary .Node .Fs .CapacityBytes , * summary .Node .Fs .AvailableBytes )
926
919
}
920
+ if summary .Node .Fs != nil && summary .Node .Fs .Inodes != nil && summary .Node .Fs .InodesUsed != nil && summary .Node .Fs .InodesFree != nil {
921
+ framework .Logf ("rootFsInfo.Inodes: %d, rootFsInfo.InodesUsed: %d, rootFsInfo.InodesFree: %d" , * summary .Node .Fs .Inodes , * summary .Node .Fs .InodesUsed , * summary .Node .Fs .InodesFree )
922
+ }
927
923
for _ , pod := range summary .Pods {
928
924
framework .Logf ("Pod: %s" , pod .PodRef .Name )
929
925
for _ , container := range pod .Containers {
930
- if container .Rootfs != nil && container .Rootfs .UsedBytes != nil {
931
- framework .Logf ("--- summary Container: %s UsedBytes: %d" , container .Name , * container .Rootfs .UsedBytes )
926
+ if container .Rootfs != nil && container .Rootfs .UsedBytes != nil && container .Rootfs .AvailableBytes != nil {
927
+ framework .Logf ("--- summary Container: %s UsedBytes: %d, AvailableBytes: %d" , container .Name , * container .Rootfs .UsedBytes , * container .Rootfs .AvailableBytes )
928
+ }
929
+ if container .Rootfs != nil && container .Rootfs .Inodes != nil && container .Rootfs .InodesUsed != nil && container .Rootfs .InodesFree != nil {
930
+ framework .Logf ("--- summary Container: %s Inodes: %d, InodesUsed: %d, InodesFree: %d" , container .Name , * container .Rootfs .Inodes , * container .Rootfs .InodesUsed , * container .Rootfs .InodesFree )
932
931
}
933
932
}
934
933
for _ , volume := range pod .VolumeStats {
935
- if volume .FsStats .InodesUsed != nil {
936
- framework .Logf ("--- summary Volume: %s UsedBytes: %d" , volume .Name , * volume .FsStats .UsedBytes )
934
+ if volume .FsStats .UsedBytes != nil && volume .FsStats .AvailableBytes != nil {
935
+ framework .Logf ("--- summary Volume: %s UsedBytes: %d, AvailableBytest: %d" , volume .Name , * volume .FsStats .UsedBytes , * volume .FsStats .AvailableBytes )
936
+ }
937
+ if volume .FsStats .Inodes != nil && volume .FsStats .InodesUsed != nil && volume .FsStats .InodesFree != nil {
938
+ framework .Logf ("--- summary Volume: %s Inodes: %d, InodesUsed: %d, InodesFree: %d" , volume .Name , * volume .FsStats .Inodes , * volume .FsStats .InodesUsed , * volume .FsStats .InodesFree )
937
939
}
938
940
}
939
941
}
0 commit comments