@@ -120,6 +120,7 @@ var _ = SIGDescribe("Conntrack", func() {
120
120
// TODO(#91236): Remove once the test is debugged and fixed.
121
121
// dump conntrack table for debugging
122
122
defer dumpConntrack (cs )
123
+ defer dumpIptables (cs )
123
124
124
125
// Create a NodePort service
125
126
udpJig := e2eservice .NewTestJig (cs , ns , serviceName )
@@ -139,24 +140,34 @@ var _ = SIGDescribe("Conntrack", func() {
139
140
cmd := fmt .Sprintf (`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done` , srcPort , serverNodeInfo .nodeIP , udpService .Spec .Ports [0 ].NodePort )
140
141
clientPod .Spec .Containers [0 ].Command = []string {"/bin/sh" , "-c" , cmd }
141
142
clientPod .Spec .Containers [0 ].Name = podClient
142
- fr .PodClient ().CreateSync (clientPod )
143
+ pod := fr .PodClient ().CreateSync (clientPod )
143
144
144
145
// Read the client pod logs
145
146
logs , err := e2epod .GetPodLogs (cs , ns , podClient , podClient )
146
147
framework .ExpectNoError (err )
147
148
framework .Logf ("Pod client logs: %s" , logs )
148
149
150
+ framework .Logf ("Pod client connection %s:%d --> %s:%d" , pod .Status .PodIP , srcPort , serverNodeInfo .nodeIP , udpService .Spec .Ports [0 ].NodePort )
151
+ // TODO(#91236): Remove once the test is debugged and fixed.
152
+ // dump conntrack table for debugging
153
+ dumpConntrack (cs )
154
+
149
155
// Add a backend pod to the service in the other node
150
156
ginkgo .By ("creating a backend pod " + podBackend1 + " for the service " + serviceName )
151
157
serverPod1 := newAgnhostPod (podBackend1 , "netexec" , fmt .Sprintf ("--udp-port=%d" , 80 ))
152
158
serverPod1 .Labels = udpJig .Labels
153
159
serverPod1 .Spec .NodeName = serverNodeInfo .name
154
- fr .PodClient ().CreateSync (serverPod1 )
160
+ pod1 := fr .PodClient ().CreateSync (serverPod1 )
155
161
156
162
// Waiting for service to expose endpoint.
157
163
err = validateEndpointsPorts (cs , ns , serviceName , portsByPodName {podBackend1 : {80 }})
158
164
framework .ExpectNoError (err , "failed to validate endpoints for service %s in namespace: %s" , serviceName , ns )
159
165
166
+ framework .Logf ("Pod client connection to backend1 after NAT %s:%d --> %s:80" , pod .Status .PodIP , srcPort , pod1 .Status .PodIP )
167
+ // TODO(#91236): Remove once the test is debugged and fixed.
168
+ // dump conntrack table for debugging
169
+ dumpConntrack (cs )
170
+
160
171
// Note that the fact that Endpoints object already exists, does NOT mean
161
172
// that iptables (or whatever else is used) was already programmed.
162
173
// Additionally take into account that UDP conntract entries timeout is
@@ -175,7 +186,7 @@ var _ = SIGDescribe("Conntrack", func() {
175
186
serverPod2 := newAgnhostPod (podBackend2 , "netexec" , fmt .Sprintf ("--udp-port=%d" , 80 ))
176
187
serverPod2 .Labels = udpJig .Labels
177
188
serverPod2 .Spec .NodeName = serverNodeInfo .name
178
- fr .PodClient ().CreateSync (serverPod2 )
189
+ pod2 := fr .PodClient ().CreateSync (serverPod2 )
179
190
180
191
// and delete the first pod
181
192
framework .Logf ("Cleaning up %s pod" , podBackend1 )
@@ -185,6 +196,11 @@ var _ = SIGDescribe("Conntrack", func() {
185
196
err = validateEndpointsPorts (cs , ns , serviceName , portsByPodName {podBackend2 : {80 }})
186
197
framework .ExpectNoError (err , "failed to validate endpoints for service %s in namespace: %s" , serviceName , ns )
187
198
199
+ framework .Logf ("Pod client connection to backend2 after NAT %s:%d --> %s:80" , pod .Status .PodIP , srcPort , pod2 .Status .PodIP )
200
+ // TODO(#91236): Remove once the test is debugged and fixed.
201
+ // dump conntrack table for debugging
202
+ dumpConntrack (cs )
203
+
188
204
// Check that the second pod keeps receiving traffic
189
205
// UDP conntrack entries timeout is 30 sec by default
190
206
ginkgo .By ("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo .nodeIP )
@@ -200,6 +216,7 @@ var _ = SIGDescribe("Conntrack", func() {
200
216
// TODO(#91236): Remove once the test is debugged and fixed.
201
217
// dump conntrack table for debugging
202
218
defer dumpConntrack (cs )
219
+ defer dumpIptables (cs )
203
220
204
221
// Create a ClusterIP service
205
222
udpJig := e2eservice .NewTestJig (cs , ns , serviceName )
@@ -219,24 +236,32 @@ var _ = SIGDescribe("Conntrack", func() {
219
236
cmd := fmt .Sprintf (`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done` , srcPort , udpService .Spec .ClusterIP , udpService .Spec .Ports [0 ].Port )
220
237
clientPod .Spec .Containers [0 ].Command = []string {"/bin/sh" , "-c" , cmd }
221
238
clientPod .Spec .Containers [0 ].Name = podClient
222
- fr .PodClient ().CreateSync (clientPod )
223
-
239
+ pod := fr .PodClient ().CreateSync (clientPod )
224
240
// Read the client pod logs
225
241
logs , err := e2epod .GetPodLogs (cs , ns , podClient , podClient )
226
242
framework .ExpectNoError (err )
227
243
framework .Logf ("Pod client logs: %s" , logs )
228
244
245
+ framework .Logf ("Pod client connection %s:%d --> %s:%d" , pod .Status .PodIP , srcPort , udpService .Spec .ClusterIP , udpService .Spec .Ports [0 ].Port )
246
+ // TODO(#91236): Remove once the test is debugged and fixed.
247
+ // dump conntrack table for debugging
248
+ dumpConntrack (cs )
249
+
229
250
// Add a backend pod to the service in the other node
230
251
ginkgo .By ("creating a backend pod " + podBackend1 + " for the service " + serviceName )
231
252
serverPod1 := newAgnhostPod (podBackend1 , "netexec" , fmt .Sprintf ("--udp-port=%d" , 80 ))
232
253
serverPod1 .Labels = udpJig .Labels
233
254
serverPod1 .Spec .NodeName = serverNodeInfo .name
234
- fr .PodClient ().CreateSync (serverPod1 )
235
-
255
+ pod1 := fr .PodClient ().CreateSync (serverPod1 )
236
256
// Waiting for service to expose endpoint.
237
257
err = validateEndpointsPorts (cs , ns , serviceName , portsByPodName {podBackend1 : {80 }})
238
258
framework .ExpectNoError (err , "failed to validate endpoints for service %s in namespace: %s" , serviceName , ns )
239
259
260
+ framework .Logf ("Pod client connection to backend1 after NAT %s:%d --> %s:80" , pod .Status .PodIP , srcPort , pod1 .Status .PodIP )
261
+ // TODO(#91236): Remove once the test is debugged and fixed.
262
+ // dump conntrack table for debugging
263
+ dumpConntrack (cs )
264
+
240
265
// Note that the fact that Endpoints object already exists, does NOT mean
241
266
// that iptables (or whatever else is used) was already programmed.
242
267
// Additionally take into account that UDP conntract entries timeout is
@@ -255,7 +280,7 @@ var _ = SIGDescribe("Conntrack", func() {
255
280
serverPod2 := newAgnhostPod (podBackend2 , "netexec" , fmt .Sprintf ("--udp-port=%d" , 80 ))
256
281
serverPod2 .Labels = udpJig .Labels
257
282
serverPod2 .Spec .NodeName = serverNodeInfo .name
258
- fr .PodClient ().CreateSync (serverPod2 )
283
+ pod2 := fr .PodClient ().CreateSync (serverPod2 )
259
284
260
285
// and delete the first pod
261
286
framework .Logf ("Cleaning up %s pod" , podBackend1 )
@@ -265,6 +290,11 @@ var _ = SIGDescribe("Conntrack", func() {
265
290
err = validateEndpointsPorts (cs , ns , serviceName , portsByPodName {podBackend2 : {80 }})
266
291
framework .ExpectNoError (err , "failed to validate endpoints for service %s in namespace: %s" , serviceName , ns )
267
292
293
+ framework .Logf ("Pod client connection to backend2 after NAT %s:%d --> %s:80" , pod .Status .PodIP , srcPort , pod2 .Status .PodIP )
294
+ // TODO(#91236): Remove once the test is debugged and fixed.
295
+ // dump conntrack table for debugging
296
+ dumpConntrack (cs )
297
+
268
298
// Check that the second pod keeps receiving traffic
269
299
// UDP conntrack entries timeout is 30 sec by default
270
300
ginkgo .By ("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo .nodeIP )
@@ -285,7 +315,8 @@ func dumpConntrack(cs clientset.Interface) {
285
315
framework .Logf ("failed to list kube-proxy pods in namespace: %s" , namespace )
286
316
return
287
317
}
288
- cmd := "conntrack -L"
318
+ // don't print DNS related entries
319
+ cmd := `conntrack -L -p udp | grep -v dport=53`
289
320
for _ , pod := range pods .Items {
290
321
if strings .Contains (pod .Name , "kube-proxy" ) {
291
322
stdout , err := framework .RunHostCmd (namespace , pod .Name , cmd )
@@ -297,3 +328,25 @@ func dumpConntrack(cs clientset.Interface) {
297
328
}
298
329
}
299
330
}
331
+
332
+ func dumpIptables (cs clientset.Interface ) {
333
+ // Dump iptabes rules of each node for troubleshooting using the kube-proxy pods
334
+ namespace := "kube-system"
335
+ pods , err := cs .CoreV1 ().Pods (namespace ).List (context .TODO (), metav1.ListOptions {})
336
+ if err != nil || len (pods .Items ) == 0 {
337
+ framework .Logf ("failed to list kube-proxy pods in namespace: %s" , namespace )
338
+ return
339
+ }
340
+
341
+ cmd := "iptables-save"
342
+ for _ , pod := range pods .Items {
343
+ if strings .Contains (pod .Name , "kube-proxy" ) {
344
+ stdout , err := framework .RunHostCmd (namespace , pod .Name , cmd )
345
+ if err != nil {
346
+ framework .Logf ("Failed to dump iptables rules of node %s: %v" , pod .Spec .NodeName , err )
347
+ continue
348
+ }
349
+ framework .Logf ("iptables rules of node %s: %s" , pod .Spec .NodeName , stdout )
350
+ }
351
+ }
352
+ }
0 commit comments