@@ -20,8 +20,10 @@ import (
20
20
"context"
21
21
22
22
"github.com/onsi/ginkgo/v2"
23
+ "github.com/onsi/gomega"
23
24
24
25
v1 "k8s.io/api/core/v1"
26
+ "k8s.io/apimachinery/pkg/api/resource"
25
27
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26
28
"k8s.io/apimachinery/pkg/labels"
27
29
"k8s.io/kubernetes/test/e2e/framework"
@@ -36,7 +38,7 @@ var _ = SIGDescribe("PodOSRejection", framework.WithNodeConformance(), func() {
36
38
f := framework .NewDefaultFramework ("pod-os-rejection" )
37
39
f .NamespacePodSecurityLevel = admissionapi .LevelBaseline
38
40
ginkgo .Context ("Kubelet" , func () {
39
- ginkgo .It ("should reject pod when the node OS doesn't match pod's OS" , func (ctx context.Context ) {
41
+ ginkgo .It ("[LinuxOnly] should reject pod when the node OS doesn't match pod's OS" , func (ctx context.Context ) {
40
42
linuxNode , err := findLinuxNode (ctx , f )
41
43
framework .ExpectNoError (err )
42
44
pod := & v1.Pod {
@@ -65,6 +67,83 @@ var _ = SIGDescribe("PodOSRejection", framework.WithNodeConformance(), func() {
65
67
})
66
68
})
67
69
70
+ var _ = SIGDescribe ("PodRejectionStatus" , func () {
71
+ f := framework .NewDefaultFramework ("pod-rejection-status" )
72
+ f .NamespacePodSecurityLevel = admissionapi .LevelBaseline
73
+ ginkgo .Context ("Kubelet" , func () {
74
+ ginkgo .It ("should reject pod when the node didn't have enough resource" , func (ctx context.Context ) {
75
+ node , err := e2enode .GetRandomReadySchedulableNode (ctx , f .ClientSet )
76
+ framework .ExpectNoError (err , "Failed to get a ready schedulable node" )
77
+
78
+ // Create a pod that requests more CPU than the node has
79
+ pod := & v1.Pod {
80
+ ObjectMeta : metav1.ObjectMeta {
81
+ Name : "pod-out-of-cpu" ,
82
+ Namespace : f .Namespace .Name ,
83
+ },
84
+ Spec : v1.PodSpec {
85
+ Containers : []v1.Container {
86
+ {
87
+ Name : "pod-out-of-cpu" ,
88
+ Image : imageutils .GetPauseImageName (),
89
+ Resources : v1.ResourceRequirements {
90
+ Requests : v1.ResourceList {
91
+ v1 .ResourceCPU : resource .MustParse ("1000000000000" ), // requests more CPU than any node has
92
+ },
93
+ },
94
+ },
95
+ },
96
+ },
97
+ }
98
+
99
+ pod = e2epod .NewPodClient (f ).Create (ctx , pod )
100
+
101
+ // Wait for the scheduler to update the pod status
102
+ err = e2epod .WaitForPodNameUnschedulableInNamespace (ctx , f .ClientSet , pod .Name , pod .Namespace )
103
+ framework .ExpectNoError (err )
104
+
105
+ // Fetch the pod to get the latest status which should be last one observed by the scheduler
106
+ // before it rejected the pod
107
+ pod , err = f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Get (ctx , pod .Name , metav1.GetOptions {})
108
+ framework .ExpectNoError (err )
109
+
110
+ // force assign the Pod to a node in order to get rejection status later
111
+ binding := & v1.Binding {
112
+ ObjectMeta : metav1.ObjectMeta {
113
+ Name : pod .Name ,
114
+ Namespace : pod .Namespace ,
115
+ UID : pod .UID ,
116
+ },
117
+ Target : v1.ObjectReference {
118
+ Kind : "Node" ,
119
+ Name : node .Name ,
120
+ },
121
+ }
122
+ err = f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Bind (ctx , binding , metav1.CreateOptions {})
123
+ framework .ExpectNoError (err )
124
+
125
+ // kubelet has rejected the pod
126
+ err = e2epod .WaitForPodFailedReason (ctx , f .ClientSet , pod , "OutOfcpu" , f .Timeouts .PodStartShort )
127
+ framework .ExpectNoError (err )
128
+
129
+ // fetch the reject Pod and compare the status
130
+ gotPod , err := f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Get (ctx , pod .Name , metav1.GetOptions {})
131
+ framework .ExpectNoError (err )
132
+
133
+ // This detects if there are any new fields in Status that were dropped by the pod rejection.
134
+ // These new fields either should be kept by kubelet's admission or added explicitly in the list of fields that are having a different value or must be cleared.
135
+ expectedStatus := pod .Status .DeepCopy ()
136
+ expectedStatus .Phase = gotPod .Status .Phase
137
+ expectedStatus .Conditions = nil
138
+ expectedStatus .Message = gotPod .Status .Message
139
+ expectedStatus .Reason = gotPod .Status .Reason
140
+ expectedStatus .StartTime = gotPod .Status .StartTime
141
+ // expectedStatus.QOSClass keep it as is
142
+ gomega .Expect (gotPod .Status ).To (gomega .Equal (* expectedStatus ))
143
+ })
144
+ })
145
+ })
146
+
68
147
// findLinuxNode finds a Linux node that is Ready and Schedulable
69
148
func findLinuxNode (ctx context.Context , f * framework.Framework ) (v1.Node , error ) {
70
149
selector := labels.Set {"kubernetes.io/os" : "linux" }.AsSelector ()
0 commit comments