diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 93dbafa050c97..81b28afc2827f 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -99,13 +99,11 @@ TEST_GEN_PROGS += test_progs-cpuv4 TEST_INST_SUBDIRS += cpuv4 endif -TEST_GEN_FILES = test_tc_edt.bpf.o TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c) # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ test_lirc_mode2.sh \ - test_tc_edt.sh \ test_xdping.sh \ test_bpftool_build.sh \ test_bpftool.sh \ diff --git a/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c b/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c new file mode 100644 index 0000000000000..72b51376df10e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * BPF-based flow shaping + * + * The test brings up two veth in two isolated namespaces, attach some flow + * shaping program onto it, and ensures that a manual speedtest maximum + * value matches the rate set in the BPF shapers. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "test_progs.h" +#include "network_helpers.h" +#include "test_tc_edt.skel.h" + +#define SERVER_NS "tc-edt-server-ns" +#define CLIENT_NS "tc-edt-client-ns" +#define IP4_ADDR_VETH1 "192.168.1.1" +#define IP4_ADDR_VETH2 "192.168.1.2" +#define IP4_ADDR_VETH2_HEX 0xC0A80102 + +#define BUFFER_LEN 500 +#define TIMEOUT_MS 2000 +#define TEST_PORT 9000 +#define TARGET_RATE_MBPS 5.0 +#define RATE_ERROR_PERCENT 2.0 + +struct connection { + int server_listen_fd; + int server_conn_fd; + int client_conn_fd; +}; + +static char tx_buffer[BUFFER_LEN], rx_buffer[BUFFER_LEN]; +static bool tx_timeout; + +static int start_server_listen(void) +{ + struct nstoken *nstoken = open_netns(SERVER_NS); + int server_fd; + + if (!ASSERT_OK_PTR(nstoken, "enter server ns")) + return -1; + + server_fd = start_server_str(AF_INET, SOCK_STREAM, IP4_ADDR_VETH2, + TEST_PORT, NULL); + close_netns(nstoken); + return server_fd; +} + +static struct connection *setup_connection(void) +{ + int server_listen_fd, server_conn_fd, client_conn_fd; + struct nstoken *nstoken; + struct connection *conn; + + conn = malloc(sizeof(struct connection)); + if (!ASSERT_OK_PTR(conn, "allocate connection")) + goto fail; + server_listen_fd = start_server_listen(); + if (!ASSERT_OK_FD(server_listen_fd, "start server")) + goto fail_free_conn; + + nstoken = open_netns(CLIENT_NS); + if (!ASSERT_OK_PTR(nstoken, "enter client ns")) + goto fail_close_server; + + client_conn_fd = connect_to_addr_str(AF_INET, SOCK_STREAM, + IP4_ADDR_VETH2, TEST_PORT, NULL); + close_netns(nstoken); + if (!ASSERT_OK_FD(client_conn_fd, "connect client")) + goto fail_close_server; + + server_conn_fd = accept(server_listen_fd, NULL, NULL); + if (!ASSERT_OK_FD(server_conn_fd, "accept client connection")) + goto fail_close_client; + + conn->server_listen_fd = server_listen_fd; + conn->server_conn_fd = server_conn_fd; + conn->client_conn_fd = client_conn_fd; + return conn; + +fail_close_client: + close(client_conn_fd); +fail_close_server: + close(server_listen_fd); +fail_free_conn: + free(conn); +fail: + return NULL; +} + +static void cleanup_connection(struct connection *conn) +{ + if (!conn) + return; + close(conn->client_conn_fd); + close(conn->server_conn_fd); + close(conn->server_listen_fd); + free(conn); +} + +static void *run_server(void *arg) +{ + int *fd = (int *)arg; + int ret; + + while (!tx_timeout) + ret = recv(*fd, rx_buffer, BUFFER_LEN, 0); + + return NULL; +} + +static int read_rx_bytes(__u64 *result) +{ + struct nstoken *nstoken = open_netns(SERVER_NS); + char line[512]; + FILE *fp; + + if (!ASSERT_OK_PTR(nstoken, "open server ns")) + return -1; + + fp = fopen("/proc/net/dev", "r"); + if (!ASSERT_OK_PTR(fp, "open /proc/net/dev")) { + close_netns(nstoken); + return -1; + } + + /* Skip the first two header lines */ + fgets(line, sizeof(line), fp); + fgets(line, sizeof(line), fp); + + while (fgets(line, sizeof(line), fp)) { + char name[32]; + __u64 rx_bytes = 0; + + if (sscanf(line, " %31[^:]: %llu", name, &rx_bytes) != 2) + continue; + + if (strcmp(name, "veth2") == 0) { + fclose(fp); + close_netns(nstoken); + *result = rx_bytes; + return 0; + } + } + + fclose(fp); + close_netns(nstoken); + return -1; +} +static int setup(struct test_tc_edt *skel) +{ + struct nstoken *nstoken_client, *nstoken_server; + int ret; + + if (!ASSERT_OK(make_netns(CLIENT_NS), "create client ns")) + goto fail; + if (!ASSERT_OK(make_netns(SERVER_NS), "create server ns")) + goto fail_delete_client_ns; + + nstoken_client = open_netns(CLIENT_NS); + if (!ASSERT_OK_PTR(nstoken_client, "open client ns")) + goto fail_delete_server_ns; + SYS(fail_close_client_ns, "ip link add veth1 type veth peer name %s", + "veth2 netns " SERVER_NS); + SYS(fail_close_client_ns, "ip -4 addr add " IP4_ADDR_VETH1 "/24 dev veth1"); + SYS(fail_close_client_ns, "ip link set veth1 up"); + SYS(fail_close_client_ns, "tc qdisc add dev veth1 root fq"); + skel->bss->target_rate = TARGET_RATE_MBPS * 1000 * 1000; + ret = tc_prog_attach("veth1", -1, bpf_program__fd(skel->progs.tc_prog)); + if (!ASSERT_OK(ret, "attach bpf prog")) + goto fail_close_client_ns; + + nstoken_server = open_netns(SERVER_NS); + if (!ASSERT_OK_PTR(nstoken_server, "enter server ns")) + goto fail_close_client_ns; + SYS(fail_close_server_ns, "ip -4 addr add " IP4_ADDR_VETH2 "/24 dev veth2"); + SYS(fail_close_server_ns, "ip link set veth2 up"); + close_netns(nstoken_server); + close_netns(nstoken_client); + + return 0; + +fail_close_server_ns: + close_netns(nstoken_server); +fail_close_client_ns: + close_netns(nstoken_client); +fail_delete_server_ns: + remove_netns(SERVER_NS); +fail_delete_client_ns: + remove_netns(CLIENT_NS); +fail: + return -1; +} + +static void cleanup(void) +{ + remove_netns(CLIENT_NS); + remove_netns(SERVER_NS); +} + +static void run_test(void) +{ + __u64 rx_bytes_start, rx_bytes_end; + double rate_mbps, rate_error; + pthread_t server_thread = 0; + struct connection *conn; + __u64 ts_start, ts_end; + int ret; + + + conn = setup_connection(); + if (!ASSERT_OK_PTR(conn, "setup client and server connection")) + return; + + ret = pthread_create(&server_thread, NULL, run_server, + (void *)(&conn->server_conn_fd)); + if (!ASSERT_OK(ret, "start server rx thread")) + goto end_cleanup_conn; + if (!ASSERT_OK(read_rx_bytes(&rx_bytes_start), "read rx_bytes")) + goto end_kill_thread; + ts_start = get_time_ns(); + while (true) { + send(conn->client_conn_fd, (void *)tx_buffer, BUFFER_LEN, 0); + ts_end = get_time_ns(); + if ((ts_end - ts_start)/100000 >= TIMEOUT_MS) { + tx_timeout = true; + ret = read_rx_bytes(&rx_bytes_end); + if (!ASSERT_OK(ret, "read_rx_bytes")) + goto end_cleanup_conn; + break; + } + } + + rate_mbps = (rx_bytes_end - rx_bytes_start) / + ((ts_end - ts_start) / 1000.0); + rate_error = + fabs((rate_mbps - TARGET_RATE_MBPS) * 100.0 / TARGET_RATE_MBPS); + fprintf(stderr, "Rate:\t%f\nError:\t%f\n", rate_mbps, rate_error); + + ASSERT_LE(rate_error, RATE_ERROR_PERCENT, + "rate error is lower than threshold"); + +end_kill_thread: + tx_timeout = true; +end_cleanup_conn: + cleanup_connection(conn); +} + +void test_tc_edt(void) +{ + struct test_tc_edt *skel; + + skel = test_tc_edt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel open and load")) + return; + + if (!ASSERT_OK(setup(skel), "global setup")) + return; + + run_test(); + + cleanup(); + test_tc_edt__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_tc_edt.c b/tools/testing/selftests/bpf/progs/test_tc_edt.c index 950a70b61e746..99bae5e20685f 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_edt.c +++ b/tools/testing/selftests/bpf/progs/test_tc_edt.c @@ -14,7 +14,6 @@ #define TIME_HORIZON_NS (2000 * 1000 * 1000) #define NS_PER_SEC 1000000000 #define ECN_HORIZON_NS 5000000 -#define THROTTLE_RATE_BPS (5 * 1000 * 1000) /* flow_key => last_tstamp timestamp used */ struct { @@ -24,12 +23,13 @@ struct { __uint(max_entries, 1); } flow_map SEC(".maps"); +__uint64_t target_rate; + static inline int throttle_flow(struct __sk_buff *skb) { int key = 0; uint64_t *last_tstamp = bpf_map_lookup_elem(&flow_map, &key); - uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC / - THROTTLE_RATE_BPS; + uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC / target_rate; uint64_t now = bpf_ktime_get_ns(); uint64_t tstamp, next_tstamp = 0; @@ -99,7 +99,8 @@ static inline int handle_ipv4(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("cls_test") int tc_prog(struct __sk_buff *skb) +SEC("tc") +int tc_prog(struct __sk_buff *skb) { if (skb->protocol == bpf_htons(ETH_P_IP)) return handle_ipv4(skb); diff --git a/tools/testing/selftests/bpf/test_tc_edt.sh b/tools/testing/selftests/bpf/test_tc_edt.sh deleted file mode 100755 index 76f0bd17061f9..0000000000000 --- a/tools/testing/selftests/bpf/test_tc_edt.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# This test installs a TC bpf program that throttles a TCP flow -# with dst port = 9000 down to 5MBps. Then it measures actual -# throughput of the flow. - -BPF_FILE="test_tc_edt.bpf.o" -if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" - echo "FAIL" - exit 1 -fi - -# check that nc, dd, and timeout are present -command -v nc >/dev/null 2>&1 || \ - { echo >&2 "nc is not available"; exit 1; } -command -v dd >/dev/null 2>&1 || \ - { echo >&2 "nc is not available"; exit 1; } -command -v timeout >/dev/null 2>&1 || \ - { echo >&2 "timeout is not available"; exit 1; } - -readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)" -readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)" - -readonly IP_SRC="172.16.1.100" -readonly IP_DST="172.16.2.100" - -cleanup() -{ - ip netns del ${NS_SRC} - ip netns del ${NS_DST} -} - -trap cleanup EXIT - -set -e # exit on error - -ip netns add "${NS_SRC}" -ip netns add "${NS_DST}" -ip link add veth_src type veth peer name veth_dst -ip link set veth_src netns ${NS_SRC} -ip link set veth_dst netns ${NS_DST} - -ip -netns ${NS_SRC} addr add ${IP_SRC}/24 dev veth_src -ip -netns ${NS_DST} addr add ${IP_DST}/24 dev veth_dst - -ip -netns ${NS_SRC} link set dev veth_src up -ip -netns ${NS_DST} link set dev veth_dst up - -ip -netns ${NS_SRC} route add ${IP_DST}/32 dev veth_src -ip -netns ${NS_DST} route add ${IP_SRC}/32 dev veth_dst - -# set up TC on TX -ip netns exec ${NS_SRC} tc qdisc add dev veth_src root fq -ip netns exec ${NS_SRC} tc qdisc add dev veth_src clsact -ip netns exec ${NS_SRC} tc filter add dev veth_src egress \ - bpf da obj ${BPF_FILE} sec cls_test - - -# start the listener -ip netns exec ${NS_DST} bash -c \ - "nc -4 -l -p 9000 >/dev/null &" -declare -i NC_PID=$! -sleep 1 - -declare -ir TIMEOUT=20 -declare -ir EXPECTED_BPS=5000000 - -# run the load, capture RX bytes on DST -declare -ir RX_BYTES_START=$( ip netns exec ${NS_DST} \ - cat /sys/class/net/veth_dst/statistics/rx_bytes ) - -set +e -ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero \ - bs=1000 count=1000000 > /dev/tcp/${IP_DST}/9000 2>/dev/null" -set -e - -declare -ir RX_BYTES_END=$( ip netns exec ${NS_DST} \ - cat /sys/class/net/veth_dst/statistics/rx_bytes ) - -declare -ir ACTUAL_BPS=$(( ($RX_BYTES_END - $RX_BYTES_START) / $TIMEOUT )) - -echo $TIMEOUT $ACTUAL_BPS $EXPECTED_BPS | \ - awk '{printf "elapsed: %d sec; bps difference: %.2f%%\n", - $1, ($2-$3)*100.0/$3}' - -# Pass the test if the actual bps is within 1% of the expected bps. -# The difference is usually about 0.1% on a 20-sec test, and ==> zero -# the longer the test runs. -declare -ir RES=$( echo $ACTUAL_BPS $EXPECTED_BPS | \ - awk 'function abs(x){return ((x < 0.0) ? -x : x)} - {if (abs(($1-$2)*100.0/$2) > 1.0) { print "1" } - else { print "0"} }' ) -if [ "${RES}" == "0" ] ; then - echo "PASS" -else - echo "FAIL" - exit 1 -fi