Skip to content

Commit 7a9e1fb

Browse files
authored
Merge pull request kubernetes#76218 from anfernee/regression-issue-74839
Add e2e for connection reset issue
2 parents e302d8d + 5168e62 commit 7a9e1fb

File tree

1 file changed

+112
-1
lines changed

1 file changed

+112
-1
lines changed

test/e2e/network/kube_proxy.go

Lines changed: 112 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import (
2424
"strings"
2525
"time"
2626

27-
"k8s.io/api/core/v1"
27+
v1 "k8s.io/api/core/v1"
2828
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2929

3030
"k8s.io/kubernetes/test/e2e/framework"
@@ -215,4 +215,115 @@ var _ = SIGDescribe("Network", func() {
215215
Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
216216
BeNumerically("<", (epsilonSeconds)))
217217
})
218+
219+
// Regression test for #74839, where:
220+
// Packets considered INVALID by conntrack are now dropped. In particular, this fixes
221+
// a problem where spurious retransmits in a long-running TCP connection to a service
222+
// IP could result in the connection being closed with the error "Connection reset by
223+
// peer"
224+
It("should resolve connrection reset issue #74839 [Slow]", func() {
225+
serverLabel := map[string]string{
226+
"app": "boom-server",
227+
}
228+
clientLabel := map[string]string{
229+
"app": "client",
230+
}
231+
232+
serverPod := &v1.Pod{
233+
ObjectMeta: metav1.ObjectMeta{
234+
Name: "boom-server",
235+
Labels: serverLabel,
236+
},
237+
Spec: v1.PodSpec{
238+
Containers: []v1.Container{
239+
{
240+
Name: "boom-server",
241+
Image: "gcr.io/kubernetes-e2e-test-images/regression-issue-74839-amd64:1.0",
242+
Ports: []v1.ContainerPort{
243+
{
244+
ContainerPort: 9000, // Default port exposed by boom-server
245+
},
246+
},
247+
},
248+
},
249+
Affinity: &v1.Affinity{
250+
PodAntiAffinity: &v1.PodAntiAffinity{
251+
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
252+
{
253+
LabelSelector: &metav1.LabelSelector{
254+
MatchLabels: clientLabel,
255+
},
256+
TopologyKey: "kubernetes.io/hostname",
257+
},
258+
},
259+
},
260+
},
261+
},
262+
}
263+
_, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(serverPod)
264+
framework.ExpectNoError(err)
265+
266+
By("Server pod created")
267+
268+
svc := &v1.Service{
269+
ObjectMeta: metav1.ObjectMeta{
270+
Name: "boom-server",
271+
},
272+
Spec: v1.ServiceSpec{
273+
Selector: serverLabel,
274+
Ports: []v1.ServicePort{
275+
{
276+
Protocol: v1.ProtocolTCP,
277+
Port: 9000,
278+
},
279+
},
280+
},
281+
}
282+
_, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(svc)
283+
framework.ExpectNoError(err)
284+
285+
By("Server service created")
286+
287+
pod := &v1.Pod{
288+
ObjectMeta: metav1.ObjectMeta{
289+
Name: "startup-script",
290+
Labels: clientLabel,
291+
},
292+
Spec: v1.PodSpec{
293+
Containers: []v1.Container{
294+
{
295+
Name: "startup-script",
296+
Image: "gcr.io/google-containers/startup-script:v1",
297+
Command: []string{
298+
"bash", "-c", "while true; do sleep 2; nc boom-server 9000& done",
299+
},
300+
},
301+
},
302+
Affinity: &v1.Affinity{
303+
PodAntiAffinity: &v1.PodAntiAffinity{
304+
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
305+
{
306+
LabelSelector: &metav1.LabelSelector{
307+
MatchLabels: serverLabel,
308+
},
309+
TopologyKey: "kubernetes.io/hostname",
310+
},
311+
},
312+
},
313+
},
314+
RestartPolicy: v1.RestartPolicyNever,
315+
},
316+
}
317+
_, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(pod)
318+
framework.ExpectNoError(err)
319+
320+
By("Client pod created")
321+
322+
for i := 0; i < 20; i++ {
323+
time.Sleep(3 * time.Second)
324+
resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(serverPod.Name, metav1.GetOptions{})
325+
framework.ExpectNoError(err)
326+
Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(BeNil())
327+
}
328+
})
218329
})

0 commit comments

Comments
 (0)