Skip to content

Commit 176f74e

Browse files
authored
Merge pull request kubernetes#85909 from tnqn/failing-networkpolicy-test
Fix a failing test in NetworkPolicy e2e
2 parents 4bb8df4 + 55b6870 commit 176f74e

File tree

1 file changed

+94
-49
lines changed

1 file changed

+94
-49
lines changed

test/e2e/network/network_policy.go

Lines changed: 94 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -853,20 +853,58 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() {
853853
defer cleanupNetworkPolicy(f, policy)
854854

855855
ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", "client-a", service.Name))
856-
podClient := createNetworkClientPod(f, f.Namespace, "client-a", service, allowedPort)
856+
// Specify RestartPolicy to OnFailure so we can check the client pod fails in the beginning and succeeds
857+
// after updating its label, otherwise it would not restart after the first failure.
858+
podClient := createNetworkClientPodWithRestartPolicy(f, f.Namespace, "client-a", service, allowedPort, v1.RestartPolicyOnFailure)
857859
defer func() {
858860
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name))
859861
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podClient.Name, nil); err != nil {
860862
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
861863
}
862864
}()
863-
checkNoConnectivity(f, f.Namespace, podClient, service)
865+
// Check Container exit code as restartable Pod's Phase will be Running even when container fails.
866+
checkNoConnectivityByExitCode(f, f.Namespace, podClient, service)
864867

865868
ginkgo.By(fmt.Sprintf("Updating client pod %s that should successfully connect to %s.", podClient.Name, service.Name))
866-
podClient = updateNetworkClientPodLabel(f, f.Namespace, podClient.Name, "replace", "/metadata/labels", map[string]string{})
869+
podClient = updatePodLabel(f, f.Namespace, podClient.Name, "replace", "/metadata/labels", map[string]string{})
867870
checkConnectivity(f, f.Namespace, podClient, service)
868871
})
869872

873+
ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func() {
874+
const allowedPort = 80
875+
ginkgo.By("Creating a network policy for the server which denies all traffic.")
876+
policy := &networkingv1.NetworkPolicy{
877+
ObjectMeta: metav1.ObjectMeta{
878+
Name: "deny-ingress-via-isolated-label-selector",
879+
},
880+
Spec: networkingv1.NetworkPolicySpec{
881+
PodSelector: metav1.LabelSelector{
882+
MatchLabels: map[string]string{
883+
"pod-name": podServerLabelSelector,
884+
},
885+
MatchExpressions: []metav1.LabelSelectorRequirement{{
886+
Key: "isolated",
887+
Operator: metav1.LabelSelectorOpExists,
888+
}},
889+
},
890+
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
891+
Ingress: []networkingv1.NetworkPolicyIngressRule{},
892+
},
893+
}
894+
895+
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
896+
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
897+
defer cleanupNetworkPolicy(f, policy)
898+
899+
// Client can connect to service when the network policy doesn't apply to the server pod.
900+
testCanConnect(f, f.Namespace, "client-a", service, allowedPort)
901+
902+
// Client cannot connect to service after updating the server pod's labels to match the network policy's selector.
903+
ginkgo.By(fmt.Sprintf("Updating server pod %s to be selected by network policy %s.", podServer.Name, policy.Name))
904+
updatePodLabel(f, f.Namespace, podServer.Name, "add", "/metadata/labels/isolated", nil)
905+
testCannotConnect(f, f.Namespace, "client-a", service, allowedPort)
906+
})
907+
870908
ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
871909
var nsBserviceA, nsBserviceB *v1.Service
872910
var nsBpodServerA, nsBpodServerB *v1.Pod
@@ -1416,29 +1454,7 @@ func checkConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.P
14161454
framework.Logf("Waiting for %s to complete.", podClient.Name)
14171455
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
14181456
if err != nil {
1419-
// Collect pod logs when we see a failure.
1420-
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, "client")
1421-
if logErr != nil {
1422-
framework.Failf("Error getting container logs: %s", logErr)
1423-
}
1424-
1425-
// Collect current NetworkPolicies applied in the test namespace.
1426-
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
1427-
if err != nil {
1428-
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
1429-
}
1430-
1431-
// Collect the list of pods running in the test namespace.
1432-
podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
1433-
if err != nil {
1434-
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
1435-
}
1436-
1437-
pods := []string{}
1438-
for _, p := range podsInNS {
1439-
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
1440-
}
1441-
1457+
pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient)
14421458
framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
14431459

14441460
// Dump debug information for the test namespace.
@@ -1453,36 +1469,59 @@ func checkNoConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1
14531469
// We expect an error here since it's a cannot connect test.
14541470
// Dump debug information if the error was nil.
14551471
if err == nil {
1456-
// Collect pod logs when we see a failure.
1457-
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, "client")
1458-
if logErr != nil {
1459-
framework.Failf("Error getting container logs: %s", logErr)
1460-
}
1472+
pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient)
1473+
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
14611474

1462-
// Collect current NetworkPolicies applied in the test namespace.
1463-
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
1464-
if err != nil {
1465-
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
1466-
}
1475+
// Dump debug information for the test namespace.
1476+
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
1477+
}
1478+
}
14671479

1468-
// Collect the list of pods running in the test namespace.
1469-
podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
1470-
if err != nil {
1471-
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
1480+
func checkNoConnectivityByExitCode(f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) {
1481+
err := e2epod.WaitForPodCondition(f.ClientSet, ns.Name, podClient.Name, "terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
1482+
statuses := pod.Status.ContainerStatuses
1483+
if len(statuses) == 0 || statuses[0].State.Terminated == nil {
1484+
return false, nil
14721485
}
1473-
1474-
pods := []string{}
1475-
for _, p := range podsInNS {
1476-
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
1486+
if statuses[0].State.Terminated.ExitCode != 0 {
1487+
return true, fmt.Errorf("pod %q container exited with code: %d", podClient.Name, statuses[0].State.Terminated.ExitCode)
14771488
}
1478-
1479-
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
1489+
return true, nil
1490+
})
1491+
// We expect an error here since it's a cannot connect test.
1492+
// Dump debug information if the error was nil.
1493+
if err == nil {
1494+
pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient)
1495+
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
14801496

14811497
// Dump debug information for the test namespace.
14821498
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
14831499
}
14841500
}
14851501

1502+
func collectPodsAndNetworkPolicies(f *framework.Framework, podClient *v1.Pod) ([]string, *networkingv1.NetworkPolicyList, string) {
1503+
// Collect pod logs when we see a failure.
1504+
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, "client")
1505+
if logErr != nil {
1506+
framework.Failf("Error getting container logs: %s", logErr)
1507+
}
1508+
// Collect current NetworkPolicies applied in the test namespace.
1509+
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
1510+
if err != nil {
1511+
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
1512+
}
1513+
// Collect the list of pods running in the test namespace.
1514+
podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
1515+
if err != nil {
1516+
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
1517+
}
1518+
pods := []string{}
1519+
for _, p := range podsInNS {
1520+
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
1521+
}
1522+
return pods, policies, logs
1523+
}
1524+
14861525
// Create a server pod with a listening container for each port in ports[].
14871526
// Will also assign a pod label with key: "pod-name" and label set to the given podName for later use by the network
14881527
// policy.
@@ -1580,6 +1619,12 @@ func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1
15801619
// This client will attempt a one-shot connection, then die, without restarting the pod.
15811620
// Test can then be asserted based on whether the pod quit with an error or not.
15821621
func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int) *v1.Pod {
1622+
return createNetworkClientPodWithRestartPolicy(f, namespace, podName, targetService, targetPort, v1.RestartPolicyNever)
1623+
}
1624+
1625+
// Create a client pod which will attempt a netcat to the provided service, on the specified port.
1626+
// It is similar to createNetworkClientPod but supports specifying RestartPolicy.
1627+
func createNetworkClientPodWithRestartPolicy(f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int, restartPolicy v1.RestartPolicy) *v1.Pod {
15831628
pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), &v1.Pod{
15841629
ObjectMeta: metav1.ObjectMeta{
15851630
GenerateName: podName + "-",
@@ -1588,7 +1633,7 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
15881633
},
15891634
},
15901635
Spec: v1.PodSpec{
1591-
RestartPolicy: v1.RestartPolicyNever,
1636+
RestartPolicy: restartPolicy,
15921637
Containers: []v1.Container{
15931638
{
15941639
Name: "client",
@@ -1608,8 +1653,8 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
16081653
return pod
16091654
}
16101655

1611-
// Patch client pod with a map value
1612-
func updateNetworkClientPodLabel(f *framework.Framework, namespace *v1.Namespace, podName string, patchOperation string, patchPath string, patchValue map[string]string) *v1.Pod {
1656+
// Patch pod with a map value
1657+
func updatePodLabel(f *framework.Framework, namespace *v1.Namespace, podName string, patchOperation string, patchPath string, patchValue map[string]string) *v1.Pod {
16131658
type patchMapValue struct {
16141659
Op string `json:"op"`
16151660
Path string `json:"path"`

0 commit comments

Comments
 (0)