Skip to content

Commit e8bc121

Browse files
authored
Merge pull request kubernetes#85916 from ahg-g/ahg-e2e
remove max pods from e2e test
2 parents 8f7f2dd + 8aa26a6 commit e8bc121

File tree

1 file changed

+0
-41
lines changed

1 file changed

+0
-41
lines changed

test/e2e/scheduling/predicates.go

Lines changed: 0 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ type pausePodConfig struct {
7070
var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
7171
var cs clientset.Interface
7272
var nodeList *v1.NodeList
73-
var totalPodCapacity int64
7473
var RCName string
7574
var ns string
7675
f := framework.NewDefaultFramework("sched-pred")
@@ -115,46 +114,6 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
115114

116115
})
117116

118-
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
119-
// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
120-
// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
121-
//
122-
// Slow PR #13315 (8 min)
123-
ginkgo.It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() {
124-
totalPodCapacity = 0
125-
126-
for _, node := range nodeList.Items {
127-
framework.Logf("Node: %v", node)
128-
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
129-
framework.ExpectEqual(found, true)
130-
totalPodCapacity += podCapacity.Value()
131-
}
132-
133-
WaitForPodsToBeDeleted(cs)
134-
currentlyScheduledPods := WaitForStableCluster(cs, masterNodes)
135-
podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods
136-
137-
ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
138-
139-
// As the pods are distributed randomly among nodes,
140-
// it can easily happen that all nodes are satured
141-
// and there is no need to create additional pods.
142-
// StartPods requires at least one pod to replicate.
143-
if podsNeededForSaturation > 0 {
144-
framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "maxp",
145-
*initPausePod(f, pausePodConfig{
146-
Name: "",
147-
Labels: map[string]string{"name": ""},
148-
}), true, framework.Logf))
149-
}
150-
podName := "additional-pod"
151-
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
152-
Name: podName,
153-
Labels: map[string]string{"name": "additional"},
154-
}), ns, podName, false)
155-
verifyResult(cs, podsNeededForSaturation, 1, ns)
156-
})
157-
158117
// This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage limits of pods is greater than machines capacity.
159118
// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
160119
// It is so because we need to have precise control on what's running in the cluster.

0 commit comments

Comments
 (0)