Skip to content

Commit 3818f64

Browse files
committed
Refactor the node perf tests to run on non-amd64 cluster
1 parent c90bf8d commit 3818f64

File tree

4 files changed

+69
-48
lines changed

4 files changed

+69
-48
lines changed

test/e2e_node/image_list.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,8 @@ var NodeImageWhiteList = sets.NewString(
5353
imageutils.GetE2EImage(imageutils.Nonewprivs),
5454
imageutils.GetPauseImageName(),
5555
framework.GetGPUDevicePluginImage(),
56-
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-is-amd64:1.0",
57-
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep-amd64:1.0",
56+
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-is:1.0",
57+
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep:1.0",
5858
"gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0",
5959
)
6060

test/e2e_node/node_perf_test.go

Lines changed: 65 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -56,53 +56,74 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur
5656
// Slow by design.
5757
var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
5858
f := framework.NewDefaultFramework("node-performance-testing")
59+
var (
60+
wl workloads.NodePerfWorkload
61+
oldCfg *kubeletconfig.KubeletConfiguration
62+
newCfg *kubeletconfig.KubeletConfiguration
63+
pod *corev1.Pod
64+
)
65+
JustBeforeEach(func() {
66+
err := wl.PreTestExec()
67+
framework.ExpectNoError(err)
68+
oldCfg, err = getCurrentKubeletConfig()
69+
framework.ExpectNoError(err)
70+
newCfg, err = wl.KubeletConfig(oldCfg)
71+
framework.ExpectNoError(err)
72+
setKubeletConfig(f, newCfg)
73+
})
5974

60-
Context("Run node performance testing with pre-defined workloads", func() {
61-
It("run each pre-defined workload", func() {
62-
By("running the workloads")
63-
for _, workload := range workloads.NodePerfWorkloads {
64-
By("running the pre test exec from the workload")
65-
err := workload.PreTestExec()
66-
framework.ExpectNoError(err)
67-
68-
By("restarting kubelet with required configuration")
69-
// Get the Kubelet config required for this workload.
70-
oldCfg, err := getCurrentKubeletConfig()
71-
framework.ExpectNoError(err)
72-
73-
newCfg, err := workload.KubeletConfig(oldCfg)
74-
framework.ExpectNoError(err)
75-
// Set the Kubelet config required for this workload.
76-
setKubeletConfig(f, newCfg)
77-
78-
By("running the workload and waiting for success")
79-
// Make the pod for the workload.
80-
pod := makeNodePerfPod(workload)
81-
82-
// Create the pod.
83-
pod = f.PodClient().CreateSync(pod)
84-
// Wait for pod success.
85-
f.PodClient().WaitForSuccess(pod.Name, workload.Timeout())
86-
podLogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
87-
framework.ExpectNoError(err)
88-
perf, err := workload.ExtractPerformanceFromLogs(podLogs)
89-
framework.ExpectNoError(err)
90-
framework.Logf("Time to complete workload %s: %v", workload.Name(), perf)
91-
92-
// Delete the pod.
93-
gp := int64(0)
94-
delOpts := metav1.DeleteOptions{
95-
GracePeriodSeconds: &gp,
96-
}
97-
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
75+
cleanup := func() {
76+
gp := int64(0)
77+
delOpts := metav1.DeleteOptions{
78+
GracePeriodSeconds: &gp,
79+
}
80+
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
81+
By("running the post test exec from the workload")
82+
err := wl.PostTestExec()
83+
framework.ExpectNoError(err)
84+
setKubeletConfig(f, oldCfg)
85+
}
9886

99-
By("running the post test exec from the workload")
100-
err = workload.PostTestExec()
101-
framework.ExpectNoError(err)
87+
runWorkload := func() {
88+
By("running the workload and waiting for success")
89+
// Make the pod for the workload.
90+
pod = makeNodePerfPod(wl)
91+
// Create the pod.
92+
pod = f.PodClient().CreateSync(pod)
93+
// Wait for pod success.
94+
f.PodClient().WaitForSuccess(pod.Name, wl.Timeout())
95+
podLogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
96+
framework.ExpectNoError(err)
97+
perf, err := wl.ExtractPerformanceFromLogs(podLogs)
98+
framework.ExpectNoError(err)
99+
framework.Logf("Time to complete workload %s: %v", wl.Name(), perf)
100+
}
102101

103-
// Set the Kubelet config back to the old one.
104-
setKubeletConfig(f, oldCfg)
105-
}
102+
Context("Run node performance testing with pre-defined workloads", func() {
103+
BeforeEach(func() {
104+
wl = workloads.NodePerfWorkloads[0]
105+
})
106+
It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func() {
107+
defer cleanup()
108+
runWorkload()
109+
})
110+
})
111+
Context("Run node performance testing with pre-defined workloads", func() {
112+
BeforeEach(func() {
113+
wl = workloads.NodePerfWorkloads[1]
114+
})
115+
It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func() {
116+
defer cleanup()
117+
runWorkload()
118+
})
119+
})
120+
Context("Run node performance testing with pre-defined workloads", func() {
121+
BeforeEach(func() {
122+
wl = workloads.NodePerfWorkloads[2]
123+
})
124+
It("TensorFlow workload", func() {
125+
defer cleanup()
126+
runWorkload()
106127
})
107128
})
108129
})

test/e2e_node/perf/workloads/npb_ep.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ func (w npbEPWorkload) PodSpec() corev1.PodSpec {
4343
var containers []corev1.Container
4444
ctn := corev1.Container{
4545
Name: fmt.Sprintf("%s-ctn", w.Name()),
46-
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep-amd64:1.0",
46+
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep:1.0",
4747
Resources: corev1.ResourceRequirements{
4848
Requests: corev1.ResourceList{
4949
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),

test/e2e_node/perf/workloads/npb_is.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ func (w npbISWorkload) PodSpec() corev1.PodSpec {
4141
var containers []corev1.Container
4242
ctn := corev1.Container{
4343
Name: fmt.Sprintf("%s-ctn", w.Name()),
44-
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is-amd64:1.0",
44+
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is:1.0",
4545
Resources: corev1.ResourceRequirements{
4646
Requests: corev1.ResourceList{
4747
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),

0 commit comments

Comments
 (0)