Skip to content

Commit 69e2bf9

Browse files
Merge pull request #848 from yanirq/master
Allow more update time for netqueues tests
2 parents 20493dd + 44fa490 commit 69e2bf9

File tree

1 file changed

+61
-72
lines changed

1 file changed

+61
-72
lines changed

functests/1_performance/netqueues.go

Lines changed: 61 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import (
1010

1111
corev1 "k8s.io/api/core/v1"
1212
"k8s.io/apimachinery/pkg/types"
13+
"k8s.io/apimachinery/pkg/util/wait"
1314
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
1415
"k8s.io/utils/pointer"
1516
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -51,6 +52,15 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", func() {
5152
initialProfile = profile.DeepCopy()
5253

5354
performanceProfileName = profile.Name
55+
56+
tunedPaoProfile := fmt.Sprintf("openshift-node-performance-%s", performanceProfileName)
57+
//Verify the tuned profile is created on the worker-cnf nodes:
58+
tunedCmd := []string{"tuned-adm", "profile_info", tunedPaoProfile}
59+
for _, node := range workerRTNodes {
60+
tunedPod := nodes.TunedForNode(&node, RunningOnSingleNode)
61+
_, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
62+
Expect(err).ToNot(HaveOccurred())
63+
}
5464
})
5565

5666
BeforeEach(func() {
@@ -84,58 +94,27 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", func() {
8494
Context("Updating performance profile for netqueues", func() {
8595
It("[test_id:40308][crit:high][vendor:[email protected]][level:acceptance] Network device queues Should be set to the profile's reserved CPUs count ", func() {
8696
nodesDevices := make(map[string]map[string]int)
87-
count := 0
8897
if profile.Spec.Net != nil {
8998
if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 {
9099
By("To all non virtual network devices when no devices are specified under profile.Spec.Net.Devices")
91-
err := checkDeviceSupport(workerRTNodes, nodesDevices)
92-
Expect(err).ToNot(HaveOccurred())
93-
for _, devices := range nodesDevices {
94-
for _, size := range devices {
95-
if size == getReservedCPUSize(profile.Spec.CPU) {
96-
count++
97-
}
98-
}
99-
}
100-
if count == 0 {
100+
err := checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
101+
if err != nil {
101102
Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
102103
}
103104
}
104105
}
105106
})
106107

107108
It("[test_id:40542] Verify the number of network queues of all supported network interfaces are equal to reserved cpus count", func() {
108-
tunedPaoProfile := fmt.Sprintf("openshift-node-performance-%s", performanceProfileName)
109109
nodesDevices := make(map[string]map[string]int)
110-
count := 0
111-
// Populate the device map with queue sizes
112-
Eventually(func() bool {
113-
err := checkDeviceSupport(workerRTNodes, nodesDevices)
114-
Expect(err).ToNot(HaveOccurred())
115-
return true
116-
}, cluster.ComputeTestTimeout(200*time.Second, RunningOnSingleNode), testPollInterval*time.Second).Should(BeTrue())
117-
//Verify the tuned profile is created on the worker-cnf nodes:
118-
tunedCmd := []string{"tuned-adm", "profile_info", tunedPaoProfile}
119-
for _, node := range workerRTNodes {
120-
tunedPod := nodes.TunedForNode(&node, RunningOnSingleNode)
121-
_, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
122-
Expect(err).ToNot(HaveOccurred())
123-
}
124-
for _, devices := range nodesDevices {
125-
for _, size := range devices {
126-
if size == getReservedCPUSize(profile.Spec.CPU) {
127-
count++
128-
}
129-
}
130-
}
131-
if count == 0 {
110+
err := checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
111+
if err != nil {
132112
Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
133113
}
134114
})
135115

136116
It("[test_id:40543] Add interfaceName and verify the interface netqueues are equal to reserved cpus count.", func() {
137117
nodesDevices := make(map[string]map[string]int)
138-
count := 0
139118
err := checkDeviceSupport(workerRTNodes, nodesDevices)
140119
Expect(err).ToNot(HaveOccurred())
141120
nodeName, device := getRandomNodeDevice(nodesDevices)
@@ -161,27 +140,25 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", func() {
161140
node, err := nodes.GetByName(nodeName)
162141
Expect(err).ToNot(HaveOccurred())
163142
tunedPod := nodes.TunedForNode(node, RunningOnSingleNode)
164-
out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
165-
deviceExists := strings.ContainsAny(string(out), device)
166-
Expect(deviceExists).To(BeTrue())
167-
Expect(err).ToNot(HaveOccurred())
168143

169-
for _, devices := range nodesDevices {
170-
for _, size := range devices {
171-
if size == getReservedCPUSize(profile.Spec.CPU) {
172-
count++
173-
}
144+
Eventually(func() bool {
145+
out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
146+
if err != nil {
147+
return false
174148
}
175-
}
176-
if count == 0 {
149+
return strings.ContainsAny(string(out), device)
150+
}, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex")
151+
152+
nodesDevices = make(map[string]map[string]int)
153+
err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
154+
if err != nil {
177155
Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
178156
}
179157
})
180158

181159
It("[test_id:40545] Verify reserved cpus count is applied to specific supported networking devices using wildcard matches", func() {
182160
nodesDevices := make(map[string]map[string]int)
183161
var device, devicePattern string
184-
count := 0
185162
err := checkDeviceSupport(workerRTNodes, nodesDevices)
186163
Expect(err).ToNot(HaveOccurred())
187164
nodeName, device := getRandomNodeDevice(nodesDevices)
@@ -207,26 +184,24 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", func() {
207184
node, err := nodes.GetByName(nodeName)
208185
Expect(err).ToNot(HaveOccurred())
209186
tunedPod := nodes.TunedForNode(node, RunningOnSingleNode)
210-
out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
211-
deviceExists := strings.ContainsAny(string(out), device)
212-
Expect(deviceExists).To(BeTrue())
213-
Expect(err).ToNot(HaveOccurred())
214187

215-
for _, devices := range nodesDevices {
216-
for _, size := range devices {
217-
if size == getReservedCPUSize(profile.Spec.CPU) {
218-
count++
219-
}
188+
Eventually(func() bool {
189+
out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
190+
if err != nil {
191+
return false
220192
}
221-
}
222-
if count == 0 {
193+
return strings.ContainsAny(string(out), device)
194+
}, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex")
195+
196+
nodesDevices = make(map[string]map[string]int)
197+
err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
198+
if err != nil {
223199
Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
224200
}
225201
})
226202

227203
It("[test_id:40668] Verify reserved cpu count is added to networking devices matched with vendor and Device id", func() {
228204
nodesDevices := make(map[string]map[string]int)
229-
count := 0
230205
err := checkDeviceSupport(workerRTNodes, nodesDevices)
231206
Expect(err).ToNot(HaveOccurred())
232207
nodeName, device := getRandomNodeDevice(nodesDevices)
@@ -259,26 +234,40 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", func() {
259234
node, err = nodes.GetByName(nodeName)
260235
Expect(err).ToNot(HaveOccurred())
261236
tunedPod := nodes.TunedForNode(node, RunningOnSingleNode)
262-
out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
263-
deviceExists := strings.ContainsAny(string(out), device)
264-
Expect(deviceExists).To(BeTrue())
265-
Expect(err).ToNot(HaveOccurred())
266-
267-
for _, devices := range nodesDevices {
268-
for _, size := range devices {
269-
if size == getReservedCPUSize(profile.Spec.CPU) {
270-
count++
271-
}
237+
Eventually(func() bool {
238+
out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
239+
if err != nil {
240+
return false
272241
}
273-
}
274-
if count == 0 {
242+
return strings.ContainsAny(string(out), device)
243+
}, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex")
244+
245+
nodesDevices = make(map[string]map[string]int)
246+
err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
247+
if err != nil {
275248
Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
276249
}
277250
})
278251
})
279252
})
280253

281-
//Check if the device support multiple queues
254+
// Check a device that supports multiple queues and set with with reserved CPU size exists
255+
func checkDeviceSetWithReservedCPU(workerRTNodes []corev1.Node, nodesDevices map[string]map[string]int, profile performancev2.PerformanceProfile) error {
256+
return wait.PollImmediate(5*time.Second, 90*time.Second, func() (bool, error) {
257+
err := checkDeviceSupport(workerRTNodes, nodesDevices)
258+
Expect(err).ToNot(HaveOccurred())
259+
for _, devices := range nodesDevices {
260+
for _, size := range devices {
261+
if size == getReservedCPUSize(profile.Spec.CPU) {
262+
return true, nil
263+
}
264+
}
265+
}
266+
return false, nil
267+
})
268+
}
269+
270+
// Check if the device support multiple queues
282271
func checkDeviceSupport(workernodes []corev1.Node, nodesDevices map[string]map[string]int) error {
283272
cmdGetPhysicalDevices := []string{"find", "/sys/class/net", "-type", "l", "-not", "-lname", "*virtual*", "-printf", "%f "}
284273
var channelCurrentCombined int

0 commit comments

Comments
 (0)