Skip to content

Commit fcf5be2

Browse files
authored
Merge pull request #5341 from ormergi/bgp-podman-support
Enable developemet around BGP using podman
2 parents e50cecc + 44b7719 commit fcf5be2

File tree

4 files changed

+58
-44
lines changed

4 files changed

+58
-44
lines changed

contrib/kind-common

Lines changed: 40 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -174,16 +174,16 @@ EOF
174174
# Override GOBIN until https://github.com/metallb/metallb/issues/2218 is fixed.
175175
GOBIN="" inv dev-env -n ovn -b frr -p bgp -i "${ip_family}"
176176

177-
docker network rm -f clientnet
178-
docker network create --subnet="${METALLB_CLIENT_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge clientnet
179-
docker network connect clientnet frr
177+
$OCI_BIN network rm -f clientnet
178+
$OCI_BIN network create --subnet="${METALLB_CLIENT_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge clientnet
179+
$OCI_BIN network connect clientnet frr
180180
if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then
181181
# Enable IPv6 forwarding in FRR
182-
docker exec frr sysctl -w net.ipv6.conf.all.forwarding=1
182+
$OCI_BIN exec frr sysctl -w net.ipv6.conf.all.forwarding=1
183183
fi
184184
# Note: this image let's us use it also for creating load balancer backends that can send big packets
185-
docker rm -f lbclient
186-
docker run --cap-add NET_ADMIN --user 0 -d --network clientnet --rm --name lbclient quay.io/itssurya/dev-images:metallb-lbservice
185+
$OCI_BIN rm -f lbclient
186+
$OCI_BIN run --cap-add NET_ADMIN --user 0 -d --network clientnet --rm --name lbclient quay.io/itssurya/dev-images:metallb-lbservice
187187
popd
188188
delete_metallb_dir
189189

@@ -197,18 +197,18 @@ EOF
197197
kubectl label node "$n" node.kubernetes.io/exclude-from-external-load-balancers-
198198
done
199199

200-
kind_network_v4=$(docker inspect -f '{{index .NetworkSettings.Networks "kind" "IPAddress"}}' frr)
200+
kind_network_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.kind.IPAddress}}' frr)
201201
echo "FRR kind network IPv4: ${kind_network_v4}"
202-
kind_network_v6=$(docker inspect -f '{{index .NetworkSettings.Networks "kind" "GlobalIPv6Address"}}' frr)
202+
kind_network_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.kind.GlobalIPv6Address}}' frr)
203203
echo "FRR kind network IPv6: ${kind_network_v6}"
204204
local client_network_v4 client_network_v6
205-
client_network_v4=$(docker inspect -f '{{index .NetworkSettings.Networks "clientnet" "IPAddress"}}' frr)
205+
client_network_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.clientnet.IPAddress}}' frr)
206206
echo "FRR client network IPv4: ${client_network_v4}"
207-
client_network_v6=$(docker inspect -f '{{index .NetworkSettings.Networks "clientnet" "GlobalIPv6Address"}}' frr)
207+
client_network_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.clientnet.GlobalIPv6Address}}' frr)
208208
echo "FRR client network IPv6: ${client_network_v6}"
209209

210210
local client_subnets
211-
client_subnets=$(docker network inspect clientnet -f '{{range .IPAM.Config}}{{.Subnet}}#{{end}}')
211+
client_subnets=$($OCI_BIN network inspect clientnet -f '{{range .IPAM.Config}}{{.Subnet}}#{{end}}')
212212
echo "${client_subnets}"
213213
local client_subnets_v4 client_subnets_v6
214214
client_subnets_v4=$(echo "${client_subnets}" | cut -d '#' -f 1)
@@ -219,21 +219,21 @@ EOF
219219
KIND_NODES=$(kind_get_nodes)
220220
for n in ${KIND_NODES}; do
221221
if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then
222-
docker exec "${n}" ip route add "${client_subnets_v4}" via "${kind_network_v4}"
222+
$OCI_BIN exec "${n}" ip route add "${client_subnets_v4}" via "${kind_network_v4}"
223223
fi
224224
if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then
225-
docker exec "${n}" ip -6 route add "${client_subnets_v6}" via "${kind_network_v6}"
225+
$OCI_BIN exec "${n}" ip -6 route add "${client_subnets_v6}" via "${kind_network_v6}"
226226
fi
227227
done
228228

229229
# for now, we only run one test with metalLB load balancer for which this
230230
# one svcVIP (192.168.10.0/fc00:f853:ccd:e799::) is more than enough since at a time we will only
231231
# have one load balancer service
232232
if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then
233-
docker exec lbclient ip route add 192.168.10.0 via "${client_network_v4}" dev eth0
233+
$OCI_BIN exec lbclient ip route add 192.168.10.0 via "${client_network_v4}" dev eth0
234234
fi
235235
if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then
236-
docker exec lbclient ip -6 route add fc00:f853:ccd:e799:: via "${client_network_v6}" dev eth0
236+
$OCI_BIN exec lbclient ip -6 route add fc00:f853:ccd:e799:: via "${client_network_v6}" dev eth0
237237
fi
238238
sleep 30
239239
}
@@ -254,14 +254,14 @@ install_plugins() {
254254
}
255255

256256
destroy_metallb() {
257-
if docker ps --format '{{.Names}}' | grep -Eq '^lbclient$'; then
258-
docker stop lbclient
257+
if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^lbclient$'; then
258+
$OCI_BIN stop lbclient
259259
fi
260-
if docker ps --format '{{.Names}}' | grep -Eq '^frr$'; then
261-
docker stop frr
260+
if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^frr$'; then
261+
$OCI_BIN stop frr
262262
fi
263-
if docker network ls --format '{{.Name}}' | grep -q '^clientnet$'; then
264-
docker network rm clientnet
263+
if $OCI_BIN network ls --format '{{.Name}}' | grep -q '^clientnet$'; then
264+
$OCI_BIN network rm clientnet
265265
fi
266266
delete_metallb_dir
267267
}
@@ -708,7 +708,7 @@ deploy_frr_external_container() {
708708
popd || exit 1
709709
if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then
710710
# Enable IPv6 forwarding in FRR
711-
docker exec frr sysctl -w net.ipv6.conf.all.forwarding=1
711+
$OCI_BIN exec frr sysctl -w net.ipv6.conf.all.forwarding=1
712712
fi
713713
}
714714

@@ -735,40 +735,40 @@ deploy_bgp_external_server() {
735735
ip_family="ipv4"
736736
ipv6_network=""
737737
fi
738-
docker rm -f bgpserver
739-
docker network rm -f bgpnet
740-
docker network create --subnet="${BGP_SERVER_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge bgpnet
741-
docker network connect bgpnet frr
742-
docker run --cap-add NET_ADMIN --user 0 -d --network bgpnet --rm --name bgpserver -p 8080:8080 registry.k8s.io/e2e-test-images/agnhost:2.45 netexec
738+
$OCI_BIN rm -f bgpserver
739+
$OCI_BIN network rm -f bgpnet
740+
$OCI_BIN network create --subnet="${BGP_SERVER_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge bgpnet
741+
$OCI_BIN network connect bgpnet frr
742+
$OCI_BIN run --cap-add NET_ADMIN --user 0 -d --network bgpnet --rm --name bgpserver -p 8080:8080 registry.k8s.io/e2e-test-images/agnhost:2.45 netexec
743743
# let's make the bgp external server have its default route towards FRR router so that we don't need to add routes during tests back to the pods in the
744744
# cluster for return traffic
745745
local bgp_network_frr_v4 bgp_network_frr_v6
746-
bgp_network_frr_v4=$($OCI_BIN inspect -f '{{index .NetworkSettings.Networks "bgpnet" "IPAddress"}}' frr)
746+
bgp_network_frr_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.bgpnet.IPAddress}}' frr)
747747
echo "FRR kind network IPv4: ${bgp_network_frr_v4}"
748748
$OCI_BIN exec bgpserver ip route replace default via "$bgp_network_frr_v4"
749749
if [ "$PLATFORM_IPV6_SUPPORT" == true ] ; then
750-
bgp_network_frr_v6=$($OCI_BIN inspect -f '{{index .NetworkSettings.Networks "bgpnet" "GlobalIPv6Address"}}' frr)
750+
bgp_network_frr_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.bgpnet.GlobalIPv6Address}}' frr)
751751
echo "FRR kind network IPv6: ${bgp_network_frr_v6}"
752752
$OCI_BIN exec bgpserver ip -6 route replace default via "$bgp_network_frr_v6"
753753
fi
754754
# disable the default route to make sure the container only routes accross
755755
# directly connected or learnt networks (doing this at the very end since
756756
# docker changes the routing table when a new network is connected)
757-
docker exec frr ip route delete default
758-
docker exec frr ip route
759-
docker exec frr ip -6 route delete default
760-
docker exec frr ip -6 route
757+
$OCI_BIN exec frr ip route delete default
758+
$OCI_BIN exec frr ip route
759+
$OCI_BIN exec frr ip -6 route delete default
760+
$OCI_BIN exec frr ip -6 route
761761
}
762762

763763
destroy_bgp() {
764-
if docker ps --format '{{.Names}}' | grep -Eq '^bgpserver$'; then
765-
docker stop bgpserver
764+
if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^bgpserver$'; then
765+
$OCI_BIN stop bgpserver
766766
fi
767-
if docker ps --format '{{.Names}}' | grep -Eq '^frr$'; then
768-
docker stop frr
767+
if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^frr$'; then
768+
$OCI_BIN stop frr
769769
fi
770-
if docker network ls --format '{{.Name}}' | grep -q '^bgpnet$'; then
771-
docker network rm bgpnet
770+
if $OCI_BIN network ls --format '{{.Name}}' | grep -q '^bgpnet$'; then
771+
$OCI_BIN network rm bgpnet
772772
fi
773773
}
774774

@@ -807,7 +807,7 @@ install_ffr_k8s() {
807807
echo "Attempting to reach frr-k8s webhook"
808808
kind export kubeconfig --name ovn
809809
while true; do
810-
docker exec ovn-control-plane curl -ksS --connect-timeout 0.1 https://$(kubectl get svc -n frr-k8s-system frr-k8s-webhook-service -o jsonpath='{.spec.clusterIP}')
810+
$OCI_BIN exec ovn-control-plane curl -ksS --connect-timeout 0.1 https://$(kubectl get svc -n frr-k8s-system frr-k8s-webhook-service -o jsonpath='{.spec.clusterIP}')
811811
[ \$? -eq 0 ] && exit 0
812812
echo "Couldn't reach frr-k8s webhook, trying in 1s..."
813813
sleep 1s

contrib/kind.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@ function setup_kubectl_bin() {
4242
# The root cause is unknown, this also can not be reproduced in Ubuntu 20.04 or
4343
# with Fedora32 Cloud, but it does not happen if we clean first the ovn-kubernetes resources.
4444
delete() {
45+
OCI_BIN=${KIND_EXPERIMENTAL_PROVIDER:-docker}
46+
4547
if [ "$KIND_INSTALL_METALLB" == true ]; then
4648
destroy_metallb
4749
fi

test/e2e/containerengine/container_engine.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,16 @@ func (ce ContainerEngine) String() string {
1212
return string(ce)
1313
}
1414

15+
func (ce ContainerEngine) NetworkCIDRsFmt() string {
16+
if ce == Podman {
17+
return "{{json .Subnets }}"
18+
}
19+
if ce == Docker {
20+
return "{{json .IPAM.Config }}"
21+
}
22+
return ""
23+
}
24+
1525
const (
1626
Docker ContainerEngine = "docker"
1727
Podman ContainerEngine = "podman"

test/e2e/infraprovider/providers/kind/kind.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -414,7 +414,6 @@ func (c *contextKind) cleanUp() error {
414414

415415
const (
416416
nameFormat = "{{.Name}}"
417-
inspectNetworkIPAMJSON = "{{json .IPAM.Config }}"
418417
inspectNetworkIPv4GWKeyStr = "{{ .NetworkSettings.Networks.%s.Gateway }}"
419418
inspectNetworkIPv4AddrKeyStr = "{{ .NetworkSettings.Networks.%s.IPAddress }}"
420419
inspectNetworkIPv4PrefixKeyStr = "{{ .NetworkSettings.Networks.%s.IPPrefixLen }}"
@@ -437,7 +436,7 @@ func isNetworkAttachedToContainer(networkName, containerName string) bool {
437436

438437
func doesContainerNameExist(name string) bool {
439438
// check if it is present before retrieving logs
440-
stdOut, err := exec.Command(containerengine.Get().String(), "ps", "-f", fmt.Sprintf("Name=^%s$", name), "-q").CombinedOutput()
439+
stdOut, err := exec.Command(containerengine.Get().String(), "ps", "-f", fmt.Sprintf("name=^%s$", name), "-q").CombinedOutput()
441440
if err != nil {
442441
panic(fmt.Sprintf("failed to check if external container (%s) exists: %v (%s)", name, err, stdOut))
443442
}
@@ -466,13 +465,16 @@ func getNetwork(networkName string) (containerEngineNetwork, error) {
466465
return n, api.NotFound
467466
}
468467
configs := make([]containerEngineNetworkConfig, 0, 1)
469-
dataBytes, err := exec.Command(containerengine.Get().String(), "network", "inspect", "-f", inspectNetworkIPAMJSON, networkName).CombinedOutput()
468+
469+
ce := containerengine.Get()
470+
netConfFmt := ce.NetworkCIDRsFmt()
471+
dataBytes, err := exec.Command(ce.String(), "network", "inspect", "-f", netConfFmt, networkName).CombinedOutput()
470472
if err != nil {
471473
return n, fmt.Errorf("failed to extract network %q data: %v", networkName, err)
472474
}
473475
dataBytes = []byte(strings.Trim(string(dataBytes), "\n"))
474476
if err = json.Unmarshal(dataBytes, &configs); err != nil {
475-
return n, fmt.Errorf("failed to unmarshall network %q configuration using network inspect -f %q: %v", networkName, inspectNetworkIPAMJSON, err)
477+
return n, fmt.Errorf("failed to unmarshall network %q configuration using network inspect -f %q: %v", networkName, netConfFmt, err)
476478
}
477479
if len(configs) == 0 {
478480
return n, fmt.Errorf("failed to find any IPAM configuration for network %s", networkName)

0 commit comments

Comments
 (0)