@@ -120,7 +120,6 @@ var _ = SIGDescribe("Conntrack", func() {
120
120
// TODO(#91236): Remove once the test is debugged and fixed.
121
121
// dump conntrack table for debugging
122
122
defer dumpConntrack (cs )
123
- defer dumpIptables (cs )
124
123
125
124
// Create a NodePort service
126
125
udpJig := e2eservice .NewTestJig (cs , ns , serviceName )
@@ -140,34 +139,24 @@ var _ = SIGDescribe("Conntrack", func() {
140
139
cmd := fmt .Sprintf (`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done` , srcPort , serverNodeInfo .nodeIP , udpService .Spec .Ports [0 ].NodePort )
141
140
clientPod .Spec .Containers [0 ].Command = []string {"/bin/sh" , "-c" , cmd }
142
141
clientPod .Spec .Containers [0 ].Name = podClient
143
- pod := fr .PodClient ().CreateSync (clientPod )
142
+ fr .PodClient ().CreateSync (clientPod )
144
143
145
144
// Read the client pod logs
146
145
logs , err := e2epod .GetPodLogs (cs , ns , podClient , podClient )
147
146
framework .ExpectNoError (err )
148
147
framework .Logf ("Pod client logs: %s" , logs )
149
148
150
- framework .Logf ("Pod client connection %s:%d --> %s:%d" , pod .Status .PodIP , srcPort , serverNodeInfo .nodeIP , udpService .Spec .Ports [0 ].NodePort )
151
- // TODO(#91236): Remove once the test is debugged and fixed.
152
- // dump conntrack table for debugging
153
- dumpConntrack (cs )
154
-
155
149
// Add a backend pod to the service in the other node
156
150
ginkgo .By ("creating a backend pod " + podBackend1 + " for the service " + serviceName )
157
151
serverPod1 := newAgnhostPod (podBackend1 , "netexec" , fmt .Sprintf ("--udp-port=%d" , 80 ))
158
152
serverPod1 .Labels = udpJig .Labels
159
153
serverPod1 .Spec .NodeName = serverNodeInfo .name
160
- pod1 := fr .PodClient ().CreateSync (serverPod1 )
154
+ fr .PodClient ().CreateSync (serverPod1 )
161
155
162
156
// Waiting for service to expose endpoint.
163
157
err = validateEndpointsPorts (cs , ns , serviceName , portsByPodName {podBackend1 : {80 }})
164
158
framework .ExpectNoError (err , "failed to validate endpoints for service %s in namespace: %s" , serviceName , ns )
165
159
166
- framework .Logf ("Pod client connection to backend1 after NAT %s:%d --> %s:80" , pod .Status .PodIP , srcPort , pod1 .Status .PodIP )
167
- // TODO(#91236): Remove once the test is debugged and fixed.
168
- // dump conntrack table for debugging
169
- dumpConntrack (cs )
170
-
171
160
// Note that the fact that Endpoints object already exists, does NOT mean
172
161
// that iptables (or whatever else is used) was already programmed.
173
162
// Additionally take into account that UDP conntract entries timeout is
@@ -186,7 +175,7 @@ var _ = SIGDescribe("Conntrack", func() {
186
175
serverPod2 := newAgnhostPod (podBackend2 , "netexec" , fmt .Sprintf ("--udp-port=%d" , 80 ))
187
176
serverPod2 .Labels = udpJig .Labels
188
177
serverPod2 .Spec .NodeName = serverNodeInfo .name
189
- pod2 := fr .PodClient ().CreateSync (serverPod2 )
178
+ fr .PodClient ().CreateSync (serverPod2 )
190
179
191
180
// and delete the first pod
192
181
framework .Logf ("Cleaning up %s pod" , podBackend1 )
@@ -196,11 +185,6 @@ var _ = SIGDescribe("Conntrack", func() {
196
185
err = validateEndpointsPorts (cs , ns , serviceName , portsByPodName {podBackend2 : {80 }})
197
186
framework .ExpectNoError (err , "failed to validate endpoints for service %s in namespace: %s" , serviceName , ns )
198
187
199
- framework .Logf ("Pod client connection to backend2 after NAT %s:%d --> %s:80" , pod .Status .PodIP , srcPort , pod2 .Status .PodIP )
200
- // TODO(#91236): Remove once the test is debugged and fixed.
201
- // dump conntrack table for debugging
202
- dumpConntrack (cs )
203
-
204
188
// Check that the second pod keeps receiving traffic
205
189
// UDP conntrack entries timeout is 30 sec by default
206
190
ginkgo .By ("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo .nodeIP )
@@ -216,7 +200,6 @@ var _ = SIGDescribe("Conntrack", func() {
216
200
// TODO(#91236): Remove once the test is debugged and fixed.
217
201
// dump conntrack table for debugging
218
202
defer dumpConntrack (cs )
219
- defer dumpIptables (cs )
220
203
221
204
// Create a ClusterIP service
222
205
udpJig := e2eservice .NewTestJig (cs , ns , serviceName )
@@ -236,32 +219,24 @@ var _ = SIGDescribe("Conntrack", func() {
236
219
cmd := fmt .Sprintf (`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done` , srcPort , udpService .Spec .ClusterIP , udpService .Spec .Ports [0 ].Port )
237
220
clientPod .Spec .Containers [0 ].Command = []string {"/bin/sh" , "-c" , cmd }
238
221
clientPod .Spec .Containers [0 ].Name = podClient
239
- pod := fr .PodClient ().CreateSync (clientPod )
222
+ fr .PodClient ().CreateSync (clientPod )
223
+
240
224
// Read the client pod logs
241
225
logs , err := e2epod .GetPodLogs (cs , ns , podClient , podClient )
242
226
framework .ExpectNoError (err )
243
227
framework .Logf ("Pod client logs: %s" , logs )
244
228
245
- framework .Logf ("Pod client connection %s:%d --> %s:%d" , pod .Status .PodIP , srcPort , udpService .Spec .ClusterIP , udpService .Spec .Ports [0 ].Port )
246
- // TODO(#91236): Remove once the test is debugged and fixed.
247
- // dump conntrack table for debugging
248
- dumpConntrack (cs )
249
-
250
229
// Add a backend pod to the service in the other node
251
230
ginkgo .By ("creating a backend pod " + podBackend1 + " for the service " + serviceName )
252
231
serverPod1 := newAgnhostPod (podBackend1 , "netexec" , fmt .Sprintf ("--udp-port=%d" , 80 ))
253
232
serverPod1 .Labels = udpJig .Labels
254
233
serverPod1 .Spec .NodeName = serverNodeInfo .name
255
- pod1 := fr .PodClient ().CreateSync (serverPod1 )
234
+ fr .PodClient ().CreateSync (serverPod1 )
235
+
256
236
// Waiting for service to expose endpoint.
257
237
err = validateEndpointsPorts (cs , ns , serviceName , portsByPodName {podBackend1 : {80 }})
258
238
framework .ExpectNoError (err , "failed to validate endpoints for service %s in namespace: %s" , serviceName , ns )
259
239
260
- framework .Logf ("Pod client connection to backend1 after NAT %s:%d --> %s:80" , pod .Status .PodIP , srcPort , pod1 .Status .PodIP )
261
- // TODO(#91236): Remove once the test is debugged and fixed.
262
- // dump conntrack table for debugging
263
- dumpConntrack (cs )
264
-
265
240
// Note that the fact that Endpoints object already exists, does NOT mean
266
241
// that iptables (or whatever else is used) was already programmed.
267
242
// Additionally take into account that UDP conntract entries timeout is
@@ -280,7 +255,7 @@ var _ = SIGDescribe("Conntrack", func() {
280
255
serverPod2 := newAgnhostPod (podBackend2 , "netexec" , fmt .Sprintf ("--udp-port=%d" , 80 ))
281
256
serverPod2 .Labels = udpJig .Labels
282
257
serverPod2 .Spec .NodeName = serverNodeInfo .name
283
- pod2 := fr .PodClient ().CreateSync (serverPod2 )
258
+ fr .PodClient ().CreateSync (serverPod2 )
284
259
285
260
// and delete the first pod
286
261
framework .Logf ("Cleaning up %s pod" , podBackend1 )
@@ -290,11 +265,6 @@ var _ = SIGDescribe("Conntrack", func() {
290
265
err = validateEndpointsPorts (cs , ns , serviceName , portsByPodName {podBackend2 : {80 }})
291
266
framework .ExpectNoError (err , "failed to validate endpoints for service %s in namespace: %s" , serviceName , ns )
292
267
293
- framework .Logf ("Pod client connection to backend2 after NAT %s:%d --> %s:80" , pod .Status .PodIP , srcPort , pod2 .Status .PodIP )
294
- // TODO(#91236): Remove once the test is debugged and fixed.
295
- // dump conntrack table for debugging
296
- dumpConntrack (cs )
297
-
298
268
// Check that the second pod keeps receiving traffic
299
269
// UDP conntrack entries timeout is 30 sec by default
300
270
ginkgo .By ("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo .nodeIP )
@@ -315,8 +285,7 @@ func dumpConntrack(cs clientset.Interface) {
315
285
framework .Logf ("failed to list kube-proxy pods in namespace: %s" , namespace )
316
286
return
317
287
}
318
- // don't print DNS related entries
319
- cmd := `conntrack -L -p udp | grep -v dport=53`
288
+ cmd := "conntrack -L"
320
289
for _ , pod := range pods .Items {
321
290
if strings .Contains (pod .Name , "kube-proxy" ) {
322
291
stdout , err := framework .RunHostCmd (namespace , pod .Name , cmd )
@@ -328,25 +297,3 @@ func dumpConntrack(cs clientset.Interface) {
328
297
}
329
298
}
330
299
}
331
-
332
- func dumpIptables (cs clientset.Interface ) {
333
- // Dump iptabes rules of each node for troubleshooting using the kube-proxy pods
334
- namespace := "kube-system"
335
- pods , err := cs .CoreV1 ().Pods (namespace ).List (context .TODO (), metav1.ListOptions {})
336
- if err != nil || len (pods .Items ) == 0 {
337
- framework .Logf ("failed to list kube-proxy pods in namespace: %s" , namespace )
338
- return
339
- }
340
-
341
- cmd := "iptables-save"
342
- for _ , pod := range pods .Items {
343
- if strings .Contains (pod .Name , "kube-proxy" ) {
344
- stdout , err := framework .RunHostCmd (namespace , pod .Name , cmd )
345
- if err != nil {
346
- framework .Logf ("Failed to dump iptables rules of node %s: %v" , pod .Spec .NodeName , err )
347
- continue
348
- }
349
- framework .Logf ("iptables rules of node %s: %s" , pod .Spec .NodeName , stdout )
350
- }
351
- }
352
- }
0 commit comments