Skip to content

Commit 48dda0e

Browse files
authored
Fix flaky TestWaypointAsEgressGateway (#58124)
* Fix flaky TestWaypointAsEgressGateway Signed-off-by: Jackie Elliott <[email protected]> * Update servicesForSubset to set Hostname using a pod in the subset Signed-off-by: Jackie Elliott <[email protected]> --------- Signed-off-by: Jackie Elliott <[email protected]>
1 parent c93fe67 commit 48dda0e

File tree

1 file changed

+41
-37
lines changed

1 file changed

+41
-37
lines changed

tests/integration/ambient/waypoint_test.go

Lines changed: 41 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import (
2727

2828
corev1 "k8s.io/api/core/v1"
2929
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
30+
"k8s.io/apimachinery/pkg/labels"
3031
"k8s.io/apimachinery/pkg/types"
3132
k8s "sigs.k8s.io/gateway-api/apis/v1"
3233

@@ -464,6 +465,7 @@ type externalSubsetService struct {
464465
}
465466

466467
// servicesForSubsets is a helper function to create a kubernetes service for all subsets of a service
468+
// Assumes a single pod per subset
467469
func servicesForSubsets(t framework.TestContext, instance echo.Instance) []externalSubsetService {
468470
ns := instance.Config().Namespace.Name()
469471
services := []externalSubsetService{}
@@ -495,13 +497,27 @@ func servicesForSubsets(t framework.TestContext, instance echo.Instance) []exter
495497
if err != nil {
496498
t.Fatalf("failed to create service %s: %v", svc.Name, err)
497499
}
498-
t.Cleanup(func() {
500+
501+
t.CleanupConditionally(func() {
499502
t.Clusters().Default().Kube().CoreV1().Services(ns).Delete(context.TODO(), s.Name, metav1.DeleteOptions{})
500503
})
504+
505+
pods, err := t.Clusters().Default().Kube().CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
506+
LabelSelector: labels.SelectorFromSet(map[string]string{
507+
"app": instance.Config().Service,
508+
"version": subset.Version,
509+
}).String(),
510+
})
511+
if err != nil {
512+
t.Fatalf("failed to list pods: %v", err)
513+
}
514+
if len(pods.Items) != 1 {
515+
t.Fatalf("expected 1 pod found for service %s, got %d", svc.Name, len(pods.Items))
516+
}
501517
services = append(services, externalSubsetService{
502518
Name: s.Name,
503519
SubsetKey: subset.Version,
504-
Hostname: instance.WorkloadsOrFail(t)[0].PodName(),
520+
Hostname: pods.Items[0].Name,
505521
ClusterIP: s.Spec.ClusterIP,
506522
})
507523
}
@@ -567,12 +583,8 @@ spec:
567583
t.Skip("not enough external service instances")
568584
}
569585

570-
subsetServices := servicesForSubsets(t, external)
571-
externalIPs := []string{}
572-
for _, s := range subsetServices {
573-
externalIPs = append(externalIPs, s.ClusterIP)
574-
}
575-
586+
// Use the first subset service for testing consistency
587+
subsetService := servicesForSubsets(t, external)[0]
576588
resolutionNoneServiceEntry := `apiVersion: networking.istio.io/v1
577589
kind: ServiceEntry
578590
metadata:
@@ -590,46 +602,38 @@ spec:
590602
resolution: NONE
591603
location: MESH_EXTERNAL
592604
addresses:
593-
{{ range $ip := .IPs }}
594-
- "{{$ip}}"
595-
{{ end }}
605+
- {{.IP}}
596606
`
597607
t.ConfigIstio().
598608
Eval(apps.Namespace.Name(), map[string]any{
599609
"EgressNamespace": egressNamespace.Name(),
600-
"IPs": externalIPs,
610+
"IP": subsetService.ClusterIP,
601611
}, resolutionNoneServiceEntry).
602612
ApplyOrFail(t, apply.CleanupConditionally)
603613

604614
hostHeader := http.Header{}
605615
hostHeader.Set("Host", "fake-passthrough.example.com")
606616

607617
// Test that we send traffic through the waypoint successfully
608-
for i, sss := range subsetServices {
609-
if i != 0 {
610-
// Presently, only the first IP will be used by the waypoint proxy.
611-
continue
612-
}
613-
// The setup for this testing would be tricky in multi-network because the VIPs being used
614-
// are not in the mesh, but they are in a cluster.
615-
// This means each cluster's VIPs would need to be unique.
616-
// That isn't useful for testing though because it's just turning the multi-cluster
617-
// tests into multiple single-cluster tests.
618-
// Arguably, egress gateways should never be accessed across a cluster-boundary,
619-
// so perhaps the skips need not be removed as even in multi-cluster testing we expect egress
620-
// to behave as though it is single-cluster.
621-
testName := fmt.Sprintf("resolution none %s", sss.ClusterIP)
622-
runTest(t, testName, "",
623-
"relies on unmeshed ClusterIPs as a simulated external service IP",
624-
echo.CallOptions{
625-
Address: sss.ClusterIP,
626-
HTTP: echo.HTTP{Headers: hostHeader},
627-
Port: echo.Port{ServicePort: 8080},
628-
Scheme: scheme.HTTP,
629-
Count: 1,
630-
Check: check.And(check.OK(), IsL7(), check.Hostname(sss.Hostname)),
631-
})
632-
}
618+
// The setup for this testing would be tricky in multi-network because the VIPs being used
619+
// are not in the mesh, but they are in a cluster.
620+
// This means each cluster's VIPs would need to be unique.
621+
// That isn't useful for testing though because it's just turning the multi-cluster
622+
// tests into multiple single-cluster tests.
623+
// Arguably, egress gateways should never be accessed across a cluster-boundary,
624+
// so perhaps the skips need not be removed as even in multi-cluster testing we expect egress
625+
// to behave as though it is single-cluster.
626+
testName := fmt.Sprintf("resolution none %s", subsetService.ClusterIP)
627+
runTest(t, testName, "",
628+
"relies on unmeshed ClusterIPs as a simulated external service IP",
629+
echo.CallOptions{
630+
Address: subsetService.ClusterIP,
631+
HTTP: echo.HTTP{Headers: hostHeader},
632+
Port: echo.Port{ServicePort: 8080},
633+
Scheme: scheme.HTTP,
634+
Count: 1,
635+
Check: check.And(check.OK(), IsL7(), check.Hostname(subsetService.Hostname)),
636+
})
633637

634638
service := `apiVersion: networking.istio.io/v1
635639
kind: ServiceEntry

0 commit comments

Comments
 (0)