|
| 1 | +//go:build connection |
| 2 | + |
| 3 | +package connection |
| 4 | + |
| 5 | +import ( |
| 6 | + "context" |
| 7 | + "flag" |
| 8 | + "net" |
| 9 | + "os" |
| 10 | + "testing" |
| 11 | + "time" |
| 12 | + |
| 13 | + "github.com/Azure/azure-container-networking/test/integration" |
| 14 | + "github.com/Azure/azure-container-networking/test/integration/goldpinger" |
| 15 | + k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" |
| 16 | + "github.com/Azure/azure-container-networking/test/internal/retry" |
| 17 | + "github.com/pkg/errors" |
| 18 | + |
| 19 | + appsv1 "k8s.io/api/apps/v1" |
| 20 | + apierrors "k8s.io/apimachinery/pkg/api/errors" |
| 21 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 22 | +) |
| 23 | + |
| 24 | +const ( |
| 25 | + podLabelKey = "app" |
| 26 | + podCount = 2 |
| 27 | + nodepoolKey = "agentpool" |
| 28 | + LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" |
| 29 | + LinuxDeployIPv6 = "../manifests/datapath/linux-deployment-ipv6.yaml" |
| 30 | + maxRetryDelaySeconds = 10 |
| 31 | + defaultTimeoutSeconds = 120 |
| 32 | + defaultRetryDelaySeconds = 1 |
| 33 | + goldpingerRetryCount = 24 |
| 34 | + goldpingerDelayTimeSeconds = 5 |
| 35 | + gpFolder = "../manifests/goldpinger" |
| 36 | + gpClusterRolePath = gpFolder + "/cluster-role.yaml" |
| 37 | + gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml" |
| 38 | + gpServiceAccountPath = gpFolder + "/service-account.yaml" |
| 39 | + gpDaemonset = gpFolder + "/daemonset.yaml" |
| 40 | + gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml" |
| 41 | + gpDeployment = gpFolder + "/deployment.yaml" |
| 42 | +) |
| 43 | + |
| 44 | +var ( |
| 45 | + podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") |
| 46 | + podNamespace = flag.String("namespace", "default", "Namespace for test pods") |
| 47 | + nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Linux Node-Selector for pods") |
| 48 | + // TODO: add flag to support dual nic scenario |
| 49 | + isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario") |
| 50 | + defaultRetrier = retry.Retrier{ |
| 51 | + Attempts: 10, |
| 52 | + Delay: defaultRetryDelaySeconds * time.Second, |
| 53 | + } |
| 54 | +) |
| 55 | + |
| 56 | +/* |
| 57 | +This test assumes that you have the current credentials loaded in your default kubeconfig for a |
| 58 | +k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. |
| 59 | +*** The expected nodepool name is nodepool1, if the nodepool has a different name ensure that you change nodepoolSelector with: |
| 60 | + -nodepoolSelector="yournodepoolname" |
| 61 | +
|
| 62 | +To run the test use one of the following commands: |
| 63 | +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration |
| 64 | + or |
| 65 | +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -podName=acnpod -nodepoolSelector=aks-pool1 -tags=connection,integration |
| 66 | +
|
| 67 | +
|
| 68 | +This test checks pod to pod, pod to node, pod to Internet check |
| 69 | +
|
| 70 | +Timeout context is controled by the -timeout flag. |
| 71 | +
|
| 72 | +*/ |
| 73 | + |
| 74 | +func setupLinuxEnvironment(t *testing.T) { |
| 75 | + ctx := context.Background() |
| 76 | + |
| 77 | + t.Log("Create Clientset") |
| 78 | + clientset, err := k8sutils.MustGetClientset() |
| 79 | + if err != nil { |
| 80 | + t.Fatalf("could not get k8s clientset: %v", err) |
| 81 | + } |
| 82 | + |
| 83 | + t.Log("Create Label Selectors") |
| 84 | + podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) |
| 85 | + nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector) |
| 86 | + |
| 87 | + t.Log("Get Nodes") |
| 88 | + nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) |
| 89 | + if err != nil { |
| 90 | + t.Fatalf("could not get k8s node list: %v", err) |
| 91 | + } |
| 92 | + |
| 93 | + t.Log("Creating Linux pods through deployment") |
| 94 | + |
| 95 | + // run goldpinger ipv4 and ipv6 test cases saperately |
| 96 | + var daemonset appsv1.DaemonSet |
| 97 | + var deployment appsv1.Deployment |
| 98 | + |
| 99 | + if *isDualStack { |
| 100 | + deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPv6) |
| 101 | + if err != nil { |
| 102 | + t.Fatal(err) |
| 103 | + } |
| 104 | + |
| 105 | + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) |
| 106 | + if err != nil { |
| 107 | + t.Fatal(err) |
| 108 | + } |
| 109 | + } else { |
| 110 | + deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4) |
| 111 | + if err != nil { |
| 112 | + t.Fatal(err) |
| 113 | + } |
| 114 | + |
| 115 | + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) |
| 116 | + if err != nil { |
| 117 | + t.Fatal(err) |
| 118 | + } |
| 119 | + } |
| 120 | + |
| 121 | + // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount |
| 122 | + rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) |
| 123 | + if err != nil { |
| 124 | + t.Log(os.Getwd()) |
| 125 | + t.Fatal(err) |
| 126 | + } |
| 127 | + |
| 128 | + // Fields for overwritting existing deployment yaml. |
| 129 | + // Defaults from flags will not change anything |
| 130 | + deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix |
| 131 | + deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix |
| 132 | + deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector |
| 133 | + deployment.Name = *podPrefix |
| 134 | + deployment.Namespace = *podNamespace |
| 135 | + daemonset.Namespace = *podNamespace |
| 136 | + |
| 137 | + deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) |
| 138 | + err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) |
| 139 | + if err != nil { |
| 140 | + t.Fatal(err) |
| 141 | + } |
| 142 | + |
| 143 | + daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) |
| 144 | + err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) |
| 145 | + if err != nil { |
| 146 | + t.Fatal(err) |
| 147 | + } |
| 148 | + |
| 149 | + t.Cleanup(func() { |
| 150 | + t.Log("cleaning up resources") |
| 151 | + rbacSetupFn() |
| 152 | + |
| 153 | + if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { |
| 154 | + t.Log(err) |
| 155 | + } |
| 156 | + |
| 157 | + if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { |
| 158 | + t.Log(err) |
| 159 | + } |
| 160 | + }) |
| 161 | + |
| 162 | + t.Log("Waiting for pods to be running state") |
| 163 | + err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) |
| 164 | + if err != nil { |
| 165 | + t.Fatalf("Pods are not in running state due to %+v", err) |
| 166 | + } |
| 167 | + |
| 168 | + if *isDualStack { |
| 169 | + t.Log("Successfully created customer dualstack Linux pods") |
| 170 | + } else { |
| 171 | + t.Log("Successfully created customer singlestack Linux pods") |
| 172 | + } |
| 173 | + |
| 174 | + t.Log("Checking Linux test environment") |
| 175 | + for _, node := range nodes.Items { |
| 176 | + pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) |
| 177 | + if err != nil { |
| 178 | + t.Fatalf("could not get k8s clientset: %v", err) |
| 179 | + } |
| 180 | + if len(pods.Items) <= 1 { |
| 181 | + t.Fatal("Less than 2 pods on node") |
| 182 | + } |
| 183 | + } |
| 184 | + |
| 185 | + t.Log("Linux test environment ready") |
| 186 | +} |
| 187 | + |
| 188 | +func TestDatapathLinux(t *testing.T) { |
| 189 | + ctx := context.Background() |
| 190 | + |
| 191 | + t.Log("Get REST config") |
| 192 | + restConfig := k8sutils.MustGetRestConfig(t) |
| 193 | + |
| 194 | + t.Log("Create Clientset") |
| 195 | + clientset, _ := k8sutils.MustGetClientset() |
| 196 | + |
| 197 | + setupLinuxEnvironment(t) |
| 198 | + podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) |
| 199 | + |
| 200 | + t.Run("Linux ping tests", func(t *testing.T) { |
| 201 | + // Check goldpinger health |
| 202 | + t.Run("all pods have IPs assigned", func(t *testing.T) { |
| 203 | + err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) |
| 204 | + if err != nil { |
| 205 | + t.Fatalf("Pods are not in running state due to %+v", err) |
| 206 | + } |
| 207 | + t.Log("all pods have been allocated IPs") |
| 208 | + }) |
| 209 | + |
| 210 | + if *isDualStack { |
| 211 | + t.Run("Linux dualstack overlay tests", func(t *testing.T) { |
| 212 | + t.Run("test dualstack overlay", func(t *testing.T) { |
| 213 | + podsClient := clientset.CoreV1().Pods(*podNamespace) |
| 214 | + |
| 215 | + checkPodIPsFn := func() error { |
| 216 | + podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"}) |
| 217 | + if err != nil { |
| 218 | + return err |
| 219 | + } |
| 220 | + |
| 221 | + for _, pod := range podList.Items { |
| 222 | + podIPs := pod.Status.PodIPs |
| 223 | + if len(podIPs) < 2 { |
| 224 | + return errors.New("a pod only gets one IP") |
| 225 | + } |
| 226 | + if net.ParseIP(podIPs[0].IP).To4() == nil || net.ParseIP(podIPs[1].IP).To16() == nil { |
| 227 | + return errors.New("a pod does not have both ipv4 and ipv6 address") |
| 228 | + } |
| 229 | + } |
| 230 | + return nil |
| 231 | + } |
| 232 | + err := defaultRetrier.Do(ctx, checkPodIPsFn) |
| 233 | + if err != nil { |
| 234 | + t.Fatalf("dualstack overlay pod properties check is failed due to: %v", err) |
| 235 | + } |
| 236 | + |
| 237 | + t.Log("all dualstack linux pods properties have been verified") |
| 238 | + }) |
| 239 | + }) |
| 240 | + } |
| 241 | + |
| 242 | + t.Run("all linux pods can ping each other", func(t *testing.T) { |
| 243 | + clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) |
| 244 | + defer cancel() |
| 245 | + |
| 246 | + pfOpts := k8s.PortForwardingOpts{ |
| 247 | + Namespace: *podNamespace, |
| 248 | + LabelSelector: podLabelSelector, |
| 249 | + LocalPort: 9090, |
| 250 | + DestPort: 8080, |
| 251 | + } |
| 252 | + |
| 253 | + pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) |
| 254 | + if err != nil { |
| 255 | + t.Fatal(err) |
| 256 | + } |
| 257 | + |
| 258 | + portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) |
| 259 | + defer cancel() |
| 260 | + |
| 261 | + portForwardFn := func() error { |
| 262 | + err := pf.Forward(portForwardCtx) |
| 263 | + if err != nil { |
| 264 | + t.Logf("unable to start port forward: %v", err) |
| 265 | + return err |
| 266 | + } |
| 267 | + return nil |
| 268 | + } |
| 269 | + |
| 270 | + if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { |
| 271 | + t.Fatalf("could not start port forward within %d: %v", defaultTimeoutSeconds, err) |
| 272 | + } |
| 273 | + defer pf.Stop() |
| 274 | + |
| 275 | + gpClient := goldpinger.Client{Host: pf.Address()} |
| 276 | + clusterCheckFn := func() error { |
| 277 | + clusterState, err := gpClient.CheckAll(clusterCheckCtx) |
| 278 | + if err != nil { |
| 279 | + return err |
| 280 | + } |
| 281 | + stats := goldpinger.ClusterStats(clusterState) |
| 282 | + stats.PrintStats() |
| 283 | + if stats.AllPingsHealthy() { |
| 284 | + return nil |
| 285 | + } |
| 286 | + |
| 287 | + return errors.New("not all pings are healthy") |
| 288 | + } |
| 289 | + retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} |
| 290 | + if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { |
| 291 | + t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) |
| 292 | + } |
| 293 | + |
| 294 | + t.Log("all pings successful!") |
| 295 | + }) |
| 296 | + }) |
| 297 | +} |
0 commit comments