Skip to content

Commit fdc3489

Browse files
Geliang Tangmatttbe
authored andcommitted
selftests/bpf: Add bpf_rr scheduler & test
This patch implements the round-robin BPF MPTCP scheduler, named bpf_rr, which always picks the next available subflow to send data. If no such next subflow available, picks the first one. Using MPTCP_SCHED_TEST macro to add a new test for this bpf_rr scheduler, the arguments "1 1" means data has been sent on both net devices. Run this test by RUN_MPTCP_TEST macro. Signed-off-by: Geliang Tang <[email protected]> Reviewed-by: Mat Martineau <[email protected]> Reviewed-by: Matthieu Baerts (NGI0) <[email protected]>
1 parent 87d2f3c commit fdc3489

File tree

2 files changed

+93
-0
lines changed

2 files changed

+93
-0
lines changed

tools/testing/selftests/bpf/prog_tests/mptcp.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include "mptcp_bpf_iters.skel.h"
1515
#include "mptcp_bpf_first.skel.h"
1616
#include "mptcp_bpf_bkup.skel.h"
17+
#include "mptcp_bpf_rr.skel.h"
1718

1819
#define NS_TEST "mptcp_ns"
1920
#define ADDR_1 "10.0.1.1"
@@ -696,6 +697,18 @@ static void test_bkup(void)
696697
mptcp_bpf_bkup__destroy(skel);
697698
}
698699

700+
static void test_rr(void)
701+
{
702+
struct mptcp_bpf_rr *skel;
703+
704+
skel = mptcp_bpf_rr__open_and_load();
705+
if (!ASSERT_OK_PTR(skel, "open_and_load: rr"))
706+
return;
707+
708+
test_bpf_sched(skel->obj, "rr", WITH_DATA, WITH_DATA);
709+
mptcp_bpf_rr__destroy(skel);
710+
}
711+
699712
void test_mptcp(void)
700713
{
701714
if (test__start_subtest("base"))
@@ -712,4 +725,6 @@ void test_mptcp(void)
712725
test_first();
713726
if (test__start_subtest("bkup"))
714727
test_bkup();
728+
if (test__start_subtest("rr"))
729+
test_rr();
715730
}
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2022, SUSE. */
3+
4+
#include "mptcp_bpf.h"
5+
#include <bpf/bpf_tracing.h>
6+
7+
char _license[] SEC("license") = "GPL";
8+
9+
struct mptcp_rr_storage {
10+
struct sock *last_snd;
11+
};
12+
13+
struct {
14+
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
15+
__uint(map_flags, BPF_F_NO_PREALLOC);
16+
__type(key, int);
17+
__type(value, struct mptcp_rr_storage);
18+
} mptcp_rr_map SEC(".maps");
19+
20+
SEC("struct_ops")
21+
void BPF_PROG(mptcp_sched_rr_init, struct mptcp_sock *msk)
22+
{
23+
bpf_sk_storage_get(&mptcp_rr_map, msk, 0,
24+
BPF_LOCAL_STORAGE_GET_F_CREATE);
25+
}
26+
27+
SEC("struct_ops")
28+
void BPF_PROG(mptcp_sched_rr_release, struct mptcp_sock *msk)
29+
{
30+
bpf_sk_storage_delete(&mptcp_rr_map, msk);
31+
}
32+
33+
SEC("struct_ops")
34+
int BPF_PROG(bpf_rr_get_send, struct mptcp_sock *msk,
35+
struct mptcp_sched_data *data)
36+
{
37+
struct mptcp_subflow_context *subflow;
38+
struct mptcp_rr_storage *ptr;
39+
struct sock *last_snd = NULL;
40+
int nr = 0;
41+
42+
ptr = bpf_sk_storage_get(&mptcp_rr_map, msk, 0,
43+
BPF_LOCAL_STORAGE_GET_F_CREATE);
44+
if (!ptr)
45+
return -1;
46+
47+
last_snd = ptr->last_snd;
48+
49+
for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
50+
subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
51+
if (!last_snd || !subflow)
52+
break;
53+
54+
if (mptcp_subflow_tcp_sock(subflow) == last_snd) {
55+
if (i + 1 == MPTCP_SUBFLOWS_MAX ||
56+
!bpf_mptcp_subflow_ctx_by_pos(data, i + 1))
57+
break;
58+
59+
nr = i + 1;
60+
break;
61+
}
62+
}
63+
64+
subflow = bpf_mptcp_subflow_ctx_by_pos(data, nr);
65+
if (!subflow)
66+
return -1;
67+
mptcp_subflow_set_scheduled(subflow, true);
68+
ptr->last_snd = mptcp_subflow_tcp_sock(subflow);
69+
return 0;
70+
}
71+
72+
SEC(".struct_ops")
73+
struct mptcp_sched_ops rr = {
74+
.init = (void *)mptcp_sched_rr_init,
75+
.release = (void *)mptcp_sched_rr_release,
76+
.get_send = (void *)bpf_rr_get_send,
77+
.name = "bpf_rr",
78+
};

0 commit comments

Comments
 (0)