Skip to content

Commit 5edf15e

Browse files
author
Kenichi Omichi
committed
Use worker nodes for WaitForStableCluster()
WaitForStableCluster() checks all pods run on worker nodes, and the function used to refer master nodes to skip checking controller plane pods. GetMasterAndWorkerNodes() was used for getting master nodes, but the implementation is not good because it usesDeprecatedMightBeMasterNode(). This makes WaitForStableCluster() refer worker nodes directly to avoid using GetMasterAndWorkerNodes().
1 parent e2d8f6c commit 5edf15e

File tree

6 files changed

+27
-26
lines changed

6 files changed

+27
-26
lines changed

test/e2e/framework/node/resource.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -363,8 +363,9 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e
363363
return nodes, nil
364364
}
365365

366-
// GetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
367-
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
366+
// DeprecatedGetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
367+
// NOTE: This function has been deprecated because of calling DeprecatedMightBeMasterNode().
368+
func DeprecatedGetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
368369
nodes := &v1.NodeList{}
369370
masters := sets.NewString()
370371
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})

test/e2e/scheduling/framework.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,10 @@ func SIGDescribe(text string, body func()) bool {
4141
}
4242

4343
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
44-
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
44+
func WaitForStableCluster(c clientset.Interface, workerNodes sets.String) int {
4545
startTime := time.Now()
4646
// Wait for all pods to be scheduled.
47-
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, masterNodes, metav1.NamespaceAll)
47+
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, workerNodes, metav1.NamespaceAll)
4848
for len(allNotScheduledPods) != 0 {
4949
time.Sleep(waitTime)
5050
if startTime.Add(timeout).Before(time.Now()) {
@@ -55,7 +55,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
5555
framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
5656
break
5757
}
58-
allScheduledPods, allNotScheduledPods = getScheduledAndUnscheduledPods(c, masterNodes, metav1.NamespaceAll)
58+
allScheduledPods, allNotScheduledPods = getScheduledAndUnscheduledPods(c, workerNodes, metav1.NamespaceAll)
5959
}
6060
return len(allScheduledPods)
6161
}
@@ -79,7 +79,7 @@ func WaitForPodsToBeDeleted(c clientset.Interface) {
7979
}
8080

8181
// getScheduledAndUnscheduledPods lists scheduled and not scheduled pods in the given namespace, with succeeded and failed pods filtered out.
82-
func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) {
82+
func getScheduledAndUnscheduledPods(c clientset.Interface, workerNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) {
8383
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
8484
framework.ExpectNoError(err, fmt.Sprintf("listing all pods in namespace %q while waiting for stable cluster", ns))
8585
// API server returns also Pods that succeeded. We need to filter them out.
@@ -90,7 +90,7 @@ func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.Stri
9090
}
9191
}
9292
pods.Items = filteredPods
93-
return GetPodsScheduled(masterNodes, pods)
93+
return GetPodsScheduled(workerNodes, pods)
9494
}
9595

9696
// getDeletingPods returns whether there are any pods marked for deletion.

test/e2e/scheduling/predicates.go

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ const (
5353
var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")
5454

5555
// variable set in BeforeEach, never modified afterwards
56-
var masterNodes sets.String
56+
var workerNodes sets.String
5757

5858
type pausePodConfig struct {
5959
Name string
@@ -95,17 +95,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
9595

9696
framework.AllNodesReady(cs, time.Minute)
9797

98-
// NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods.
99-
masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs)
100-
if err != nil {
101-
framework.Logf("Unexpected error occurred: %v", err)
102-
}
10398
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
10499
if err != nil {
105100
framework.Logf("Unexpected error occurred: %v", err)
106101
}
107-
108102
framework.ExpectNoErrorWithOffset(0, err)
103+
for _, n := range nodeList.Items {
104+
workerNodes.Insert(n.Name)
105+
}
109106

110107
err = framework.CheckTestingNSDeletedExcept(cs, ns)
111108
framework.ExpectNoError(err)
@@ -135,7 +132,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
135132
nodeMaxAllocatable = allocatable.Value()
136133
}
137134
}
138-
WaitForStableCluster(cs, masterNodes)
135+
WaitForStableCluster(cs, workerNodes)
139136

140137
pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
141138
framework.ExpectNoError(err)
@@ -215,7 +212,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
215212
var beardsecond v1.ResourceName = "example.com/beardsecond"
216213

217214
ginkgo.BeforeEach(func() {
218-
WaitForStableCluster(cs, masterNodes)
215+
WaitForStableCluster(cs, workerNodes)
219216
ginkgo.By("Add RuntimeClass and fake resource")
220217

221218
// find a node which can run a pod:
@@ -323,7 +320,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
323320
Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity.
324321
*/
325322
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
326-
WaitForStableCluster(cs, masterNodes)
323+
WaitForStableCluster(cs, workerNodes)
327324
nodeMaxAllocatable := int64(0)
328325
nodeToAllocatableMap := make(map[string]int64)
329326
for _, node := range nodeList.Items {
@@ -436,7 +433,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
436433
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
437434
podName := "restricted-pod"
438435

439-
WaitForStableCluster(cs, masterNodes)
436+
WaitForStableCluster(cs, workerNodes)
440437

441438
conf := pausePodConfig{
442439
Name: podName,
@@ -491,7 +488,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
491488
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
492489
podName := "restricted-pod"
493490

494-
WaitForStableCluster(cs, masterNodes)
491+
WaitForStableCluster(cs, workerNodes)
495492

496493
conf := pausePodConfig{
497494
Name: podName,
@@ -933,7 +930,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podN
933930
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
934931
allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
935932
framework.ExpectNoError(err)
936-
scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods)
933+
scheduledPods, notScheduledPods := GetPodsScheduled(workerNodes, allPods)
937934

938935
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
939936
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
@@ -1045,10 +1042,10 @@ func translateIPv4ToIPv6(ip string) string {
10451042
return ip
10461043
}
10471044

1048-
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
1049-
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
1045+
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes.
1046+
func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
10501047
for _, pod := range pods.Items {
1051-
if !masterNodes.Has(pod.Spec.NodeName) {
1048+
if workerNodes.Has(pod.Spec.NodeName) {
10521049
if pod.Spec.NodeName != "" {
10531050
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
10541051
framework.ExpectEqual(scheduledCondition != nil, true)

test/e2e/scheduling/preemption.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,11 +97,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
9797
}
9898

9999
e2enode.WaitForTotalHealthy(cs, time.Minute)
100-
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
100+
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
101101
if err != nil {
102102
framework.Logf("Unexpected error occurred: %v", err)
103103
}
104104
framework.ExpectNoErrorWithOffset(0, err)
105+
for _, n := range nodeList.Items {
106+
workerNodes.Insert(n.Name)
107+
}
105108

106109
err = framework.CheckTestingNSDeletedExcept(cs, ns)
107110
framework.ExpectNoError(err)

test/e2e/scheduling/priorities.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
144144
var err error
145145

146146
e2enode.WaitForTotalHealthy(cs, time.Minute)
147-
_, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
147+
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
148148
if err != nil {
149149
framework.Logf("Unexpected error occurred: %v", err)
150150
}

test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
9494
scParameters = make(map[string]string)
9595
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
9696
framework.ExpectNoError(err)
97-
masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client)
97+
masternodes, _, err := e2enode.DeprecatedGetMasterAndWorkerNodes(client)
9898
framework.ExpectNoError(err)
9999
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
100100
masterNode = masternodes.List()[0]

0 commit comments

Comments
 (0)