@@ -324,7 +324,11 @@ install_kubevirt() {
324324 # vX.Y.Z - install specific stable (i.e v1.3.1)
325325 # nightly - install newest nightly
326326 # nightly tag - install specific nightly (i.e 20240910)
327- KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-"stable"}
327+ # KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-"stable"}
328+
329+ # FIXME: kubevirt v1.5.0 is breaking live migration tcp connections at
330+ # at e2e tests, this pin to v1.4.0 wich is known to work
331+ KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-"v1.4.0"}
328332
329333 for node in $(kubectl get node --no-headers -o custom-columns=":metadata.name"); do
330334 $OCI_BIN exec -t $node bash -c "echo 'fs.inotify.max_user_watches=1048576' >> /etc/sysctl.conf"
@@ -409,7 +413,7 @@ install_multus() {
409413
410414install_mpolicy_crd() {
411415 echo "Installing multi-network-policy CRD ..."
412- mpolicy_manifest="https://raw.githubusercontent.com/k8snetworkplumbingwg/multi-networkpolicy/master /scheme.yml"
416+ mpolicy_manifest="https://raw.githubusercontent.com/k8snetworkplumbingwg/multi-networkpolicy/refs/tags/v1.0.1 /scheme.yml"
413417 run_kubectl apply -f "$mpolicy_manifest"
414418}
415419
@@ -648,14 +652,23 @@ get_kubevirt_release_url() {
648652 echo "$kubevirt_release_url"
649653}
650654
651- readonly FRR_K8S_VERSION=v0.0.14
655+ readonly FRR_K8S_VERSION=v0.0.17
652656readonly FRR_TMP_DIR=$(mktemp -d -u)
653657
654658clone_frr() {
655659 [ -d "$FRR_TMP_DIR" ] || {
656660 mkdir -p "$FRR_TMP_DIR" && trap 'rm -rf $FRR_TMP_DIR' EXIT
657661 pushd "$FRR_TMP_DIR" || exit 1
658662 git clone --depth 1 --branch $FRR_K8S_VERSION https://github.com/metallb/frr-k8s
663+
664+ # Download the patches
665+ curl -Ls https://github.com/jcaamano/frr-k8s/archive/refs/heads/ovnk-bgp.tar.gz | tar xzvf - frr-k8s-ovnk-bgp/patches --strip-components 1
666+
667+ # Change into the cloned repo directory before applying patches
668+ pushd frr-k8s
669+ git apply ../patches/*
670+ popd
671+
659672 popd || exit 1
660673 }
661674}
@@ -672,19 +685,86 @@ deploy_frr_external_container() {
672685 # can peer with acting as BGP (reflector) external gateway
673686 pushd "${FRR_TMP_DIR}"/frr-k8s/hack/demo || exit 1
674687 # modify config template to configure neighbors as route reflector clients
688+ # First check if IPv4 network already exists
689+ grep -q 'network '"${BGP_SERVER_NET_SUBNET_IPV4}" frr/frr.conf.tmpl || \
690+ sed -i '/address-family ipv4 unicast/a \ \ network '"${BGP_SERVER_NET_SUBNET_IPV4}"'' frr/frr.conf.tmpl
691+
692+ # Add route reflector client config
675693 sed -i '/remote-as 64512/a \ neighbor {{ . }} route-reflector-client' frr/frr.conf.tmpl
694+
695+ if [ "$KIND_IPV6_SUPPORT" == true ]; then
696+ # Check if IPv6 address-family section exists
697+ if ! grep -q 'address-family ipv6 unicast' frr/frr.conf.tmpl; then
698+ # Add IPv6 address-family section if it doesn't exist
699+ sed -i '/exit-address-family/a \ \
700+ address-family ipv6 unicast\
701+ network '"${BGP_SERVER_NET_SUBNET_IPV6}"'\
702+ exit-address-family' frr/frr.conf.tmpl
703+ else
704+ # Add network to existing IPv6 section
705+ sed -i '/address-family ipv6 unicast/a \ \ network '"${BGP_SERVER_NET_SUBNET_IPV6}"'' frr/frr.conf.tmpl
706+ fi
707+
708+ # Add route-reflector-client for IPv6 neighbors
709+ sed -i '/neighbor fc00.*remote-as 64512/a \ neighbor {{ . }} route-reflector-client' frr/frr.conf.tmpl
710+ fi
676711 ./demo.sh
677712 popd || exit 1
713+ if [ "$KIND_IPV6_SUPPORT" == true ]; then
714+ # Enable IPv6 forwarding in FRR
715+ docker exec frr sysctl -w net.ipv6.conf.all.forwarding=1
716+ fi
717+ }
678718
679- # this container will act as the gateway for the cluster and will masquerade
680- # towards the external world
681- $OCI_BIN exec frr iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
682- # set default route
683- FRR_IP=$($OCI_BIN inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" frr)
684- KIND_NODES=$(kind_get_nodes)
685- for n in $KIND_NODES; do
686- $OCI_BIN exec "$n" ip route replace default via "$FRR_IP"
687- done
719+ deploy_bgp_external_server() {
720+ # We create an external docker container that acts as the server (or client) outside the cluster
721+ # in the e2e tests that levergae router advertisements.
722+ # This container will be connected to the frr container deployed above to simulate a realistic
723+ # network topology
724+ # ----------------- ------------------ ---------------------
725+ # | | 172.26.0.0/16 | | 172.18.0.0/16 | ovn-control-plane |
726+ # | external |<------------- | FRR router |<------ KIND cluster -- ---------------------
727+ # | server | | | | ovn-worker | (client pod advertised
728+ # ----------------- ------------------ --------------------- using RouteAdvertisements
729+ # | ovn-worker2 | from default pod network)
730+ # ---------------------
731+ local ip_family ipv6_network
732+ if [ "$KIND_IPV4_SUPPORT" == true ] && [ "$KIND_IPV6_SUPPORT" == true ]; then
733+ ip_family="dual"
734+ ipv6_network="--ipv6 --subnet=${BGP_SERVER_NET_SUBNET_IPV6}"
735+ elif [ "$KIND_IPV6_SUPPORT" == true ]; then
736+ ip_family="ipv6"
737+ ipv6_network="--ipv6 --subnet=${BGP_SERVER_NET_SUBNET_IPV6}"
738+ else
739+ ip_family="ipv4"
740+ ipv6_network=""
741+ fi
742+ docker network create --subnet="${BGP_SERVER_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge bgpnet
743+ docker network connect bgpnet frr
744+ docker run --cap-add NET_ADMIN --user 0 -d --network bgpnet --rm --name bgpserver -p 8080:8080 registry.k8s.io/e2e-test-images/agnhost:2.45 netexec
745+ # let's make the bgp external server have its default route towards FRR router so that we don't need to add routes during tests back to the pods in the
746+ # cluster for return traffic
747+ local bgp_network_frr_v4 bgp_network_frr_v6
748+ bgp_network_frr_v4=$($OCI_BIN inspect -f '{{index .NetworkSettings.Networks "bgpnet" "IPAddress"}}' frr)
749+ echo "FRR kind network IPv4: ${bgp_network_frr_v4}"
750+ $OCI_BIN exec bgpserver ip route replace default via "$bgp_network_frr_v4"
751+ if [ "$KIND_IPV6_SUPPORT" == true ] ; then
752+ bgp_network_frr_v6=$($OCI_BIN inspect -f '{{index .NetworkSettings.Networks "bgpnet" "GlobalIPv6Address"}}' frr)
753+ echo "FRR kind network IPv6: ${bgp_network_frr_v6}"
754+ $OCI_BIN exec bgpserver ip -6 route replace default via "$bgp_network_frr_v6"
755+ fi
756+ }
757+
758+ destroy_bgp() {
759+ if docker ps --format '{{.Names}}' | grep -Eq '^bgpserver$'; then
760+ docker stop bgpserver
761+ fi
762+ if docker ps --format '{{.Names}}' | grep -Eq '^frr$'; then
763+ docker stop frr
764+ fi
765+ if docker network ls --format '{{.Name}}' | grep -q '^bgpnet$'; then
766+ docker network rm bgpnet
767+ fi
688768}
689769
690770install_ffr_k8s() {
@@ -699,9 +779,57 @@ install_ffr_k8s() {
699779 # apply a BGP peer configration with the external gateway that does not
700780 # exchange routes
701781 pushd "${FRR_TMP_DIR}"/frr-k8s/hack/demo/configs || exit 1
702- sed 's/all$/filtered/g' receive_all.yaml > receive_filtered.yaml
703- kubectl apply -f receive_filtered.yaml
782+ sed 's/mode: all/mode: filtered/g' receive_all.yaml > receive_filtered.yaml
783+ # Allow receiving the bgp external server's prefix
784+ sed -i '/mode: filtered/a\ prefixes:\n - prefix: '"${BGP_SERVER_NET_SUBNET_IPV4}"'' receive_filtered.yaml
785+ # If IPv6 is enabled, add the IPv6 prefix as well
786+ if [ "$KIND_IPV6_SUPPORT" == true ]; then
787+ # Find all line numbers where the IPv4 prefix is defined
788+ IPv6_LINE=" - prefix: ${BGP_SERVER_NET_SUBNET_IPV6}"
789+ # Process each occurrence of the IPv4 prefix
790+ for LINE_NUM in $(grep -n "prefix: ${BGP_SERVER_NET_SUBNET_IPV4}" receive_filtered.yaml | cut -d ':' -f 1); do
791+ # Insert the IPv6 prefix after each IPv4 prefix line
792+ sed -i "${LINE_NUM}a\\${IPv6_LINE}" receive_filtered.yaml
793+ done
794+ fi
795+ kubectl apply -n frr-k8s-system -f receive_filtered.yaml
704796 popd || exit 1
705797
706798 rm -rf "${FRR_TMP_DIR}"
799+ # Add routes for pod networks dynamically into the github runner for return traffic to pass back
800+ if [ -n "${JOB_NAME:-}" ] && [[ "$JOB_NAME" == *"shard-conformance"* ]] && [ "$ADVERTISE_DEFAULT_NETWORK" == "true" ]; then
801+ echo "Adding routes for Kubernetes pod networks..."
802+ NODES=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}')
803+ echo "Found nodes: $NODES"
804+ for node in $NODES; do
805+ # Get the addresses
806+ node_ips=$(kubectl get node $node -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
807+ # Get subnet information
808+ subnet_json=$(kubectl get node $node -o jsonpath='{.metadata.annotations.k8s\.ovn\.org/node-subnets}')
809+
810+ if [ "$KIND_IPV4_SUPPORT" == true ]; then
811+ # Extract IPv4 address (first address)
812+ node_ipv4=$(echo "$node_ips" | awk '{print $1}')
813+ ipv4_subnet=$(echo "$subnet_json" | jq -r '.default[0]')
814+
815+ # Add IPv4 route
816+ if [ -n "$ipv4_subnet" ] && [ -n "$node_ipv4" ]; then
817+ echo "Adding IPv4 route for $node ($node_ipv4): $ipv4_subnet"
818+ sudo ip route add $ipv4_subnet via $node_ipv4
819+ fi
820+ fi
821+
822+ # Add IPv6 route if enabled
823+ if [ "$KIND_IPV6_SUPPORT" == true ]; then
824+ # Extract IPv6 address (second address, if present)
825+ node_ipv6=$(echo "$node_ips" | awk '{print $2}')
826+ ipv6_subnet=$(echo "$subnet_json" | jq -r '.default[1] // empty')
827+
828+ if [ -n "$ipv6_subnet" ] && [ -n "$node_ipv6" ]; then
829+ echo "Adding IPv6 route for $node ($node_ipv6): $ipv6_subnet"
830+ sudo ip -6 route add $ipv6_subnet via $node_ipv6
831+ fi
832+ fi
833+ done
834+ fi
707835}
0 commit comments