Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 96 additions & 0 deletions test/integration/lrp/lrp_fqdn_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
//go:build lrp

package lrp

import (
"context"
"testing"

"github.com/Azure/azure-container-networking/test/internal/kubernetes"
ciliumClientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/stretchr/testify/require"
)

var (
fqdnCNPPath = ciliumManifestsDir + "fqdn-cnp.yaml"
enableFQDNFlag = "enable-l7-proxy"
)

// TestLRPFQDN tests if the local redirect policy in a cilium cluster is functioning with a
// FQDN Cilium Network Policy. As such, enable-l7-proxy should be enabled in the config
// The test assumes the current kubeconfig points to a cluster with cilium, cns,
// and kube-dns already installed. The lrp feature flag should also be enabled in the cilium config
// Resources created are automatically cleaned up
// From the lrp folder, run: go test ./ -v -tags "lrp" -run ^TestLRPFQDN$
func TestLRPFQDN(t *testing.T) {
ctx := context.Background()

selectedPod, cleanupFn := setupLRP(t, ctx)
defer cleanupFn()
require.NotNil(t, selectedPod)

cs := kubernetes.MustGetClientset()
config := kubernetes.MustGetRestConfig()
ciliumCS, err := ciliumClientset.NewForConfig(config)
require.NoError(t, err)

// ensure enable l7 proxy flag is enabled
ciliumCM, err := kubernetes.GetConfigmap(ctx, cs, kubeSystemNamespace, ciliumConfigmapName)
require.NoError(t, err)
require.Equal(t, "true", ciliumCM.Data[enableFQDNFlag], "enable-l7-proxy not set to true in cilium-config")

_, cleanupCNP := kubernetes.MustSetupCNP(ctx, ciliumCS, fqdnCNPPath)
defer cleanupCNP()

tests := []struct {
name string
command []string
expectedMsgContains string
expectedErrMsgContains string
countIncreases bool
}{
{
name: "nslookup google succeeds",
command: []string{"nslookup", "www.google.com", "10.0.0.10"},
expectedMsgContains: "Server:",
countIncreases: true,
},
{
name: "wget google succeeds",
command: []string{"wget", "-O", "index.html", "www.google.com", "--timeout=5"},
expectedErrMsgContains: "saved",
countIncreases: true,
},
{
name: "nslookup bing succeeds",
command: []string{"nslookup", "www.bing.com", "10.0.0.10"},
expectedMsgContains: "Server:",
countIncreases: true,
},
{
name: "wget bing fails but dns succeeds",
command: []string{"wget", "-O", "index.html", "www.bing.com", "--timeout=5"},
expectedErrMsgContains: "timed out",
countIncreases: true,
},
{
name: "nslookup example fails",
command: []string{"nslookup", "www.example.com", "10.0.0.10"},
expectedMsgContains: "REFUSED",
countIncreases: false,
},
{
// won't be able to nslookup, let alone query the website
name: "wget example fails",
command: []string{"wget", "-O", "index.html", "www.example.com", "--timeout=5"},
expectedErrMsgContains: "bad address",
countIncreases: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
testLRPCase(t, ctx, *selectedPod, tt.command, tt.expectedMsgContains, tt.expectedErrMsgContains, tt.countIncreases)
})
}
}
91 changes: 63 additions & 28 deletions test/integration/lrp/lrp_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"golang.org/x/exp/rand"
v1 "k8s.io/api/core/v1"
)

const (
Expand Down Expand Up @@ -46,15 +47,22 @@ var (
clientPath = ciliumManifestsDir + "client-ds.yaml"
)

// TestLRP tests if the local redirect policy in a cilium cluster is functioning
// The test assumes the current kubeconfig points to a cluster with cilium (1.16+), cns,
// and kube-dns already installed. The lrp feature flag should be enabled in the cilium config
// Resources created are automatically cleaned up
// From the lrp folder, run: go test ./lrp_test.go -v -tags "lrp" -run ^TestLRP$
func TestLRP(t *testing.T) {
config := kubernetes.MustGetRestConfig()
ctx := context.Background()
func setupLRP(t *testing.T, ctx context.Context) (*v1.Pod, func()) {
var cleanUpFns []func()
success := false
cleanupFn := func() {
for len(cleanUpFns) > 0 {
cleanUpFns[len(cleanUpFns)-1]()
cleanUpFns = cleanUpFns[:len(cleanUpFns)-1]
}
}
defer func() {
if !success {
cleanupFn()
}
}()

config := kubernetes.MustGetRestConfig()
cs := kubernetes.MustGetClientset()

ciliumCS, err := ciliumClientset.NewForConfig(config)
Expand Down Expand Up @@ -90,13 +98,13 @@ func TestLRP(t *testing.T) {

// deploy node local dns preqreqs and pods
_, cleanupConfigMap := kubernetes.MustSetupConfigMap(ctx, cs, nodeLocalDNSConfigMapPath)
defer cleanupConfigMap()
cleanUpFns = append(cleanUpFns, cleanupConfigMap)
_, cleanupServiceAccount := kubernetes.MustSetupServiceAccount(ctx, cs, nodeLocalDNSServiceAccountPath)
defer cleanupServiceAccount()
cleanUpFns = append(cleanUpFns, cleanupServiceAccount)
_, cleanupService := kubernetes.MustSetupService(ctx, cs, nodeLocalDNSServicePath)
defer cleanupService()
cleanUpFns = append(cleanUpFns, cleanupService)
nodeLocalDNSDS, cleanupNodeLocalDNS := kubernetes.MustSetupDaemonset(ctx, cs, tempNodeLocalDNSDaemonsetPath)
defer cleanupNodeLocalDNS()
cleanUpFns = append(cleanUpFns, cleanupNodeLocalDNS)
err = kubernetes.WaitForPodsRunning(ctx, cs, nodeLocalDNSDS.Namespace, nodeLocalDNSLabelSelector)
require.NoError(t, err)
// select a local dns pod after they start running
Expand All @@ -106,19 +114,19 @@ func TestLRP(t *testing.T) {

// deploy lrp
_, cleanupLRP := kubernetes.MustSetupLRP(ctx, ciliumCS, lrpPath)
defer cleanupLRP()
cleanUpFns = append(cleanUpFns, cleanupLRP)

// create client pods
clientDS, cleanupClient := kubernetes.MustSetupDaemonset(ctx, cs, clientPath)
defer cleanupClient()
cleanUpFns = append(cleanUpFns, cleanupClient)
err = kubernetes.WaitForPodsRunning(ctx, cs, clientDS.Namespace, clientLabelSelector)
require.NoError(t, err)
// select a client pod after they start running
clientPods, err := kubernetes.GetPodsByNode(ctx, cs, clientDS.Namespace, clientLabelSelector, selectedNode)
require.NoError(t, err)
selectedClientPod := TakeOne(clientPods.Items).Name
selectedClientPod := TakeOne(clientPods.Items)

t.Logf("Selected node: %s, node local dns pod: %s, client pod: %s\n", selectedNode, selectedLocalDNSPod, selectedClientPod)
t.Logf("Selected node: %s, node local dns pod: %s, client pod: %s\n", selectedNode, selectedLocalDNSPod, selectedClientPod.Name)

// port forward to local dns pod on same node (separate thread)
pf, err := k8s.NewPortForwarder(config, k8s.PortForwardingOpts{
Expand All @@ -130,17 +138,25 @@ func TestLRP(t *testing.T) {
require.NoError(t, err)
pctx := context.Background()
portForwardCtx, cancel := context.WithTimeout(pctx, (retryAttempts+1)*retryDelay)
defer cancel()
cleanUpFns = append(cleanUpFns, cancel)

err = defaultRetrier.Do(portForwardCtx, func() error {
t.Logf("attempting port forward to a pod with label %s, in namespace %s...", nodeLocalDNSLabelSelector, nodeLocalDNSDS.Namespace)
return errors.Wrap(pf.Forward(portForwardCtx), "could not start port forward")
})
require.NoError(t, err, "could not start port forward within %d", (retryAttempts+1)*retryDelay)
defer pf.Stop()
cleanUpFns = append(cleanUpFns, pf.Stop)

t.Log("started port forward")

success = true
return &selectedClientPod, cleanupFn
}

func testLRPCase(t *testing.T, ctx context.Context, clientPod v1.Pod, clientCmd []string, expectResponse, expectErrMsg string, countShouldIncrease bool) {
config := kubernetes.MustGetRestConfig()
cs := kubernetes.MustGetClientset()

// labels for target lrp metric
metricLabels := map[string]string{
"family": "1",
Expand All @@ -153,24 +169,43 @@ func TestLRP(t *testing.T) {
beforeMetric, err := prometheus.GetMetric(promAddress, coreDNSRequestCountTotal, metricLabels)
require.NoError(t, err)

t.Log("calling nslookup from client")
// nslookup to 10.0.0.10 (coredns)
val, err := kubernetes.ExecCmdOnPod(ctx, cs, clientDS.Namespace, selectedClientPod, clientContainer, []string{
"nslookup", "google.com", "10.0.0.10",
}, config)
require.NoError(t, err, string(val))
// can connect
require.Contains(t, string(val), "Server:")
t.Log("calling command from client")

val, errMsg, err := kubernetes.ExecCmdOnPodOnce(ctx, cs, clientPod.Namespace, clientPod.Name, clientContainer, clientCmd, config)

require.Contains(t, string(val), expectResponse)
require.Contains(t, string(errMsg), expectErrMsg)

// in case there is time to propagate
time.Sleep(1 * time.Second)
time.Sleep(500 * time.Millisecond)

// curl again and see count increases
afterMetric, err := prometheus.GetMetric(promAddress, coreDNSRequestCountTotal, metricLabels)
require.NoError(t, err)

// count should go up
require.Greater(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count did not increase after nslookup")
if countShouldIncrease {
require.Greater(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count did not increase after command")
} else {
require.Equal(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count increased after command")
}
}

// TestLRP tests if the local redirect policy in a cilium cluster is functioning
// The test assumes the current kubeconfig points to a cluster with cilium (1.16+), cns,
// and kube-dns already installed. The lrp feature flag should be enabled in the cilium config
// Resources created are automatically cleaned up
// From the lrp folder, run: go test ./ -v -tags "lrp" -run ^TestLRP$
func TestLRP(t *testing.T) {
ctx := context.Background()

selectedPod, cleanupFn := setupLRP(t, ctx)
defer cleanupFn()
require.NotNil(t, selectedPod)

testLRPCase(t, ctx, *selectedPod, []string{
"nslookup", "google.com", "10.0.0.10",
}, "Server:", "", true)
}

// TakeOne takes one item from the slice randomly; if empty, it returns the empty value for the type
Expand Down
24 changes: 24 additions & 0 deletions test/integration/manifests/cilium/lrp/fqdn-cnp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiVersion: "cilium.io/v2"
kind: CiliumNetworkPolicy
metadata:
name: "to-fqdn"
namespace: "default"
spec:
endpointSelector:
matchLabels:
lrp-test: "true"
egress:
- toEndpoints:
- matchLabels:
"k8s:io.kubernetes.pod.namespace": kube-system
"k8s:k8s-app": node-local-dns
toPorts:
- ports:
- port: "53"
protocol: UDP
rules:
dns:
- matchPattern: "*.google.com"
- matchPattern: "*.bing.com"
- toFQDNs:
- matchPattern: "*.google.com"
90 changes: 55 additions & 35 deletions test/internal/kubernetes/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,15 @@ func MustSetupLRP(ctx context.Context, clientset *cilium.Clientset, lrpPath stri
}
}

func MustSetupCNP(ctx context.Context, clientset *cilium.Clientset, cnpPath string) (ciliumv2.CiliumNetworkPolicy, func()) { // nolint
cnp := mustParseCNP(cnpPath)
cnps := clientset.CiliumV2().CiliumNetworkPolicies(cnp.Namespace)
mustCreateCiliumNetworkPolicy(ctx, cnps, cnp)
return cnp, func() {
MustDeleteCiliumNetworkPolicy(ctx, cnps, cnp)
}
}

func Int32ToPtr(i int32) *int32 { return &i }

func WaitForPodsRunning(ctx context.Context, clientset *kubernetes.Clientset, namespace, labelselector string) error {
Expand Down Expand Up @@ -482,47 +491,58 @@ func writeToFile(dir, fileName, str string) error {
func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName, containerName string, cmd []string, config *rest.Config) ([]byte, error) {
var result []byte
execCmdOnPod := func() error {
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec").
VersionedParams(&corev1.PodExecOptions{
Command: cmd,
Container: containerName,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: false,
}, scheme.ParameterCodec)

exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
return errors.Wrapf(err, "error in creating executor for req %s", req.URL())
}

var stdout, stderr bytes.Buffer
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: nil,
Stdout: &stdout,
Stderr: &stderr,
Tty: false,
})
if err != nil {
log.Printf("Error: %v had error %v from command - %v, will retry", podName, err, cmd)
return errors.Wrapf(err, "error in executing command %s", cmd)
}
if len(stdout.Bytes()) == 0 {
log.Printf("Warning: %v had 0 bytes returned from command - %v", podName, cmd)
}
result = stdout.Bytes()
return nil
output, _, err := ExecCmdOnPodOnce(ctx, clientset, namespace, podName, containerName, cmd, config)
result = output
return err
}
retrier := retry.Retrier{Attempts: ShortRetryAttempts, Delay: RetryDelay}
err := retrier.Do(ctx, execCmdOnPod)
return result, errors.Wrapf(err, "could not execute the cmd %s on %s", cmd, podName)
}

// ExecCmdOnPodOnce runs a command on the specified pod and returns its standard output, standard error output, and error in that order
// The command does not retry when the command fails (ex: due to timeout), unlike ExecCmdOnPod
func ExecCmdOnPodOnce(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName, containerName string, cmd []string, config *rest.Config) ([]byte, []byte, error) { // nolint
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec").
VersionedParams(&corev1.PodExecOptions{
Command: cmd,
Container: containerName,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: false,
}, scheme.ParameterCodec)

exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
return nil, nil, errors.Wrapf(err, "error in creating executor for req %s", req.URL())
}

var stdout, stderr bytes.Buffer
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: nil,
Stdout: &stdout,
Stderr: &stderr,
Tty: false,
})

result := stdout.Bytes()
errResult := stderr.Bytes()

if err != nil {
log.Printf("Error: %v had error %v from command - %v", podName, err, cmd)
return result, errResult, errors.Wrapf(err, "error in executing command %s", cmd)
}
if len(stdout.Bytes()) == 0 {
log.Printf("Warning: %v had 0 bytes returned from command - %v", podName, cmd)
}
return result, errResult, nil
}

func NamespaceExists(ctx context.Context, clientset *kubernetes.Clientset, namespace string) (bool, error) {
_, err := clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
if err != nil {
Expand Down
Loading
Loading