Skip to content

Commit 37a5201

Browse files
authored
Merge pull request kubernetes#86109 from haosdent/clean-e2e-framework-job
e2e: move funs of framework/job to e2e/upgrades/apps/job
2 parents d93999b + 21e906f commit 37a5201

File tree

3 files changed

+33
-29
lines changed

3 files changed

+33
-29
lines changed

test/e2e/framework/job/wait.go

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,12 @@ limitations under the License.
1717
package job
1818

1919
import (
20-
"fmt"
21-
"strings"
2220
"time"
2321

2422
batchv1 "k8s.io/api/batch/v1"
2523
"k8s.io/api/core/v1"
2624
"k8s.io/apimachinery/pkg/api/errors"
2725
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28-
"k8s.io/apimachinery/pkg/labels"
2926
"k8s.io/apimachinery/pkg/util/wait"
3027
clientset "k8s.io/client-go/kubernetes"
3128
jobutil "k8s.io/kubernetes/pkg/controller/job"
@@ -101,30 +98,6 @@ func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Dura
10198
})
10299
}
103100

104-
// EnsureAllJobPodsRunning uses c to check in the Job named jobName in ns
105-
// is running, returning an error if the expected parallelism is not
106-
// satisfied.
107-
func EnsureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
108-
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
109-
options := metav1.ListOptions{LabelSelector: label.String()}
110-
pods, err := c.CoreV1().Pods(ns).List(options)
111-
if err != nil {
112-
return err
113-
}
114-
podsSummary := make([]string, 0, parallelism)
115-
count := int32(0)
116-
for _, p := range pods.Items {
117-
if p.Status.Phase == v1.PodRunning {
118-
count++
119-
}
120-
podsSummary = append(podsSummary, fmt.Sprintf("%s (%s: %s)", p.ObjectMeta.Name, p.Status.Phase, p.Status.Message))
121-
}
122-
if count != parallelism {
123-
return fmt.Errorf("job has %d of %d expected running pods: %s", count, parallelism, strings.Join(podsSummary, ", "))
124-
}
125-
return nil
126-
}
127-
128101
// WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns
129102
// to be deleted.
130103
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {

test/e2e/upgrades/apps/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ go_library(
2626
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
2727
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
2828
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
29+
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
2930
"//test/e2e/framework:go_default_library",
3031
"//test/e2e/framework/deployment:go_default_library",
3132
"//test/e2e/framework/job:go_default_library",

test/e2e/upgrades/apps/job.go

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,14 @@ limitations under the License.
1717
package upgrades
1818

1919
import (
20+
"fmt"
21+
"strings"
22+
2023
batchv1 "k8s.io/api/batch/v1"
21-
"k8s.io/api/core/v1"
24+
v1 "k8s.io/api/core/v1"
25+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26+
"k8s.io/apimachinery/pkg/labels"
27+
clientset "k8s.io/client-go/kubernetes"
2228
"k8s.io/kubernetes/test/e2e/framework"
2329
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
2430
"k8s.io/kubernetes/test/e2e/upgrades"
@@ -54,11 +60,35 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) {
5460
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
5561
<-done
5662
ginkgo.By("Ensuring active pods == parallelism")
57-
err := jobutil.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
63+
err := ensureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
5864
framework.ExpectNoError(err)
5965
}
6066

6167
// Teardown cleans up any remaining resources.
6268
func (t *JobUpgradeTest) Teardown(f *framework.Framework) {
6369
// rely on the namespace deletion to clean up everything
6470
}
71+
72+
// ensureAllJobPodsRunning uses c to check in the Job named jobName in ns
73+
// is running, returning an error if the expected parallelism is not
74+
// satisfied.
75+
func ensureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
76+
label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: jobName}))
77+
options := metav1.ListOptions{LabelSelector: label.String()}
78+
pods, err := c.CoreV1().Pods(ns).List(options)
79+
if err != nil {
80+
return err
81+
}
82+
podsSummary := make([]string, 0, parallelism)
83+
count := int32(0)
84+
for _, p := range pods.Items {
85+
if p.Status.Phase == v1.PodRunning {
86+
count++
87+
}
88+
podsSummary = append(podsSummary, fmt.Sprintf("%s (%s: %s)", p.ObjectMeta.Name, p.Status.Phase, p.Status.Message))
89+
}
90+
if count != parallelism {
91+
return fmt.Errorf("job has %d of %d expected running pods: %s", count, parallelism, strings.Join(podsSummary, ", "))
92+
}
93+
return nil
94+
}

0 commit comments

Comments
 (0)