Skip to content

Commit fcba5fe

Browse files
authored
Merge pull request kubernetes#94800 from mborsz/revert-92643-connDebug
Revert "conntrack e2e test debug connections"
2 parents 0a02eda + bb6d0f9 commit fcba5fe

File tree

1 file changed

+9
-62
lines changed

1 file changed

+9
-62
lines changed

test/e2e/network/conntrack.go

Lines changed: 9 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ var _ = SIGDescribe("Conntrack", func() {
120120
// TODO(#91236): Remove once the test is debugged and fixed.
121121
// dump conntrack table for debugging
122122
defer dumpConntrack(cs)
123-
defer dumpIptables(cs)
124123

125124
// Create a NodePort service
126125
udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
@@ -140,34 +139,24 @@ var _ = SIGDescribe("Conntrack", func() {
140139
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort)
141140
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
142141
clientPod.Spec.Containers[0].Name = podClient
143-
pod := fr.PodClient().CreateSync(clientPod)
142+
fr.PodClient().CreateSync(clientPod)
144143

145144
// Read the client pod logs
146145
logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient)
147146
framework.ExpectNoError(err)
148147
framework.Logf("Pod client logs: %s", logs)
149148

150-
framework.Logf("Pod client connection %s:%d --> %s:%d", pod.Status.PodIP, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort)
151-
// TODO(#91236): Remove once the test is debugged and fixed.
152-
// dump conntrack table for debugging
153-
dumpConntrack(cs)
154-
155149
// Add a backend pod to the service in the other node
156150
ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
157151
serverPod1 := newAgnhostPod(podBackend1, "netexec", fmt.Sprintf("--udp-port=%d", 80))
158152
serverPod1.Labels = udpJig.Labels
159153
serverPod1.Spec.NodeName = serverNodeInfo.name
160-
pod1 := fr.PodClient().CreateSync(serverPod1)
154+
fr.PodClient().CreateSync(serverPod1)
161155

162156
// Waiting for service to expose endpoint.
163157
err = validateEndpointsPorts(cs, ns, serviceName, portsByPodName{podBackend1: {80}})
164158
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
165159

166-
framework.Logf("Pod client connection to backend1 after NAT %s:%d --> %s:80", pod.Status.PodIP, srcPort, pod1.Status.PodIP)
167-
// TODO(#91236): Remove once the test is debugged and fixed.
168-
// dump conntrack table for debugging
169-
dumpConntrack(cs)
170-
171160
// Note that the fact that Endpoints object already exists, does NOT mean
172161
// that iptables (or whatever else is used) was already programmed.
173162
// Additionally take into account that UDP conntract entries timeout is
@@ -186,7 +175,7 @@ var _ = SIGDescribe("Conntrack", func() {
186175
serverPod2 := newAgnhostPod(podBackend2, "netexec", fmt.Sprintf("--udp-port=%d", 80))
187176
serverPod2.Labels = udpJig.Labels
188177
serverPod2.Spec.NodeName = serverNodeInfo.name
189-
pod2 := fr.PodClient().CreateSync(serverPod2)
178+
fr.PodClient().CreateSync(serverPod2)
190179

191180
// and delete the first pod
192181
framework.Logf("Cleaning up %s pod", podBackend1)
@@ -196,11 +185,6 @@ var _ = SIGDescribe("Conntrack", func() {
196185
err = validateEndpointsPorts(cs, ns, serviceName, portsByPodName{podBackend2: {80}})
197186
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
198187

199-
framework.Logf("Pod client connection to backend2 after NAT %s:%d --> %s:80", pod.Status.PodIP, srcPort, pod2.Status.PodIP)
200-
// TODO(#91236): Remove once the test is debugged and fixed.
201-
// dump conntrack table for debugging
202-
dumpConntrack(cs)
203-
204188
// Check that the second pod keeps receiving traffic
205189
// UDP conntrack entries timeout is 30 sec by default
206190
ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
@@ -216,7 +200,6 @@ var _ = SIGDescribe("Conntrack", func() {
216200
// TODO(#91236): Remove once the test is debugged and fixed.
217201
// dump conntrack table for debugging
218202
defer dumpConntrack(cs)
219-
defer dumpIptables(cs)
220203

221204
// Create a ClusterIP service
222205
udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
@@ -236,32 +219,24 @@ var _ = SIGDescribe("Conntrack", func() {
236219
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
237220
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
238221
clientPod.Spec.Containers[0].Name = podClient
239-
pod := fr.PodClient().CreateSync(clientPod)
222+
fr.PodClient().CreateSync(clientPod)
223+
240224
// Read the client pod logs
241225
logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient)
242226
framework.ExpectNoError(err)
243227
framework.Logf("Pod client logs: %s", logs)
244228

245-
framework.Logf("Pod client connection %s:%d --> %s:%d", pod.Status.PodIP, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
246-
// TODO(#91236): Remove once the test is debugged and fixed.
247-
// dump conntrack table for debugging
248-
dumpConntrack(cs)
249-
250229
// Add a backend pod to the service in the other node
251230
ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
252231
serverPod1 := newAgnhostPod(podBackend1, "netexec", fmt.Sprintf("--udp-port=%d", 80))
253232
serverPod1.Labels = udpJig.Labels
254233
serverPod1.Spec.NodeName = serverNodeInfo.name
255-
pod1 := fr.PodClient().CreateSync(serverPod1)
234+
fr.PodClient().CreateSync(serverPod1)
235+
256236
// Waiting for service to expose endpoint.
257237
err = validateEndpointsPorts(cs, ns, serviceName, portsByPodName{podBackend1: {80}})
258238
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
259239

260-
framework.Logf("Pod client connection to backend1 after NAT %s:%d --> %s:80", pod.Status.PodIP, srcPort, pod1.Status.PodIP)
261-
// TODO(#91236): Remove once the test is debugged and fixed.
262-
// dump conntrack table for debugging
263-
dumpConntrack(cs)
264-
265240
// Note that the fact that Endpoints object already exists, does NOT mean
266241
// that iptables (or whatever else is used) was already programmed.
267242
// Additionally take into account that UDP conntract entries timeout is
@@ -280,7 +255,7 @@ var _ = SIGDescribe("Conntrack", func() {
280255
serverPod2 := newAgnhostPod(podBackend2, "netexec", fmt.Sprintf("--udp-port=%d", 80))
281256
serverPod2.Labels = udpJig.Labels
282257
serverPod2.Spec.NodeName = serverNodeInfo.name
283-
pod2 := fr.PodClient().CreateSync(serverPod2)
258+
fr.PodClient().CreateSync(serverPod2)
284259

285260
// and delete the first pod
286261
framework.Logf("Cleaning up %s pod", podBackend1)
@@ -290,11 +265,6 @@ var _ = SIGDescribe("Conntrack", func() {
290265
err = validateEndpointsPorts(cs, ns, serviceName, portsByPodName{podBackend2: {80}})
291266
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
292267

293-
framework.Logf("Pod client connection to backend2 after NAT %s:%d --> %s:80", pod.Status.PodIP, srcPort, pod2.Status.PodIP)
294-
// TODO(#91236): Remove once the test is debugged and fixed.
295-
// dump conntrack table for debugging
296-
dumpConntrack(cs)
297-
298268
// Check that the second pod keeps receiving traffic
299269
// UDP conntrack entries timeout is 30 sec by default
300270
ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
@@ -315,8 +285,7 @@ func dumpConntrack(cs clientset.Interface) {
315285
framework.Logf("failed to list kube-proxy pods in namespace: %s", namespace)
316286
return
317287
}
318-
// don't print DNS related entries
319-
cmd := `conntrack -L -p udp | grep -v dport=53`
288+
cmd := "conntrack -L"
320289
for _, pod := range pods.Items {
321290
if strings.Contains(pod.Name, "kube-proxy") {
322291
stdout, err := framework.RunHostCmd(namespace, pod.Name, cmd)
@@ -328,25 +297,3 @@ func dumpConntrack(cs clientset.Interface) {
328297
}
329298
}
330299
}
331-
332-
func dumpIptables(cs clientset.Interface) {
333-
// Dump iptabes rules of each node for troubleshooting using the kube-proxy pods
334-
namespace := "kube-system"
335-
pods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
336-
if err != nil || len(pods.Items) == 0 {
337-
framework.Logf("failed to list kube-proxy pods in namespace: %s", namespace)
338-
return
339-
}
340-
341-
cmd := "iptables-save"
342-
for _, pod := range pods.Items {
343-
if strings.Contains(pod.Name, "kube-proxy") {
344-
stdout, err := framework.RunHostCmd(namespace, pod.Name, cmd)
345-
if err != nil {
346-
framework.Logf("Failed to dump iptables rules of node %s: %v", pod.Spec.NodeName, err)
347-
continue
348-
}
349-
framework.Logf("iptables rules of node %s: %s", pod.Spec.NodeName, stdout)
350-
}
351-
}
352-
}

0 commit comments

Comments
 (0)