Skip to content

Commit 4319122

Browse files
Geliang Tangmatttbe
authored andcommitted
selftests/bpf: Add bpf_rr scheduler & test
This patch implements the round-robin BPF MPTCP scheduler, named bpf_rr, which always picks the next available subflow to send data. If no such next subflow available, picks the first one. Using MPTCP_SCHED_TEST macro to add a new test for this bpf_rr scheduler, the arguments "1 1" means data has been sent on both net devices. Run this test by RUN_MPTCP_TEST macro. Signed-off-by: Geliang Tang <[email protected]> Reviewed-by: Mat Martineau <[email protected]> Reviewed-by: Matthieu Baerts (NGI0) <[email protected]>
1 parent 946752b commit 4319122

File tree

2 files changed

+89
-0
lines changed

2 files changed

+89
-0
lines changed

tools/testing/selftests/bpf/prog_tests/mptcp.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include "mptcp_bpf_iters.skel.h"
1515
#include "mptcp_bpf_first.skel.h"
1616
#include "mptcp_bpf_bkup.skel.h"
17+
#include "mptcp_bpf_rr.skel.h"
1718

1819
#define NS_TEST "mptcp_ns"
1920
#define ADDR_1 "10.0.1.1"
@@ -697,6 +698,18 @@ static void test_bkup(void)
697698
mptcp_bpf_bkup__destroy(skel);
698699
}
699700

701+
static void test_rr(void)
702+
{
703+
struct mptcp_bpf_rr *skel;
704+
705+
skel = mptcp_bpf_rr__open_and_load();
706+
if (!ASSERT_OK_PTR(skel, "open_and_load: rr"))
707+
return;
708+
709+
test_bpf_sched(skel->maps.rr, "rr", WITH_DATA, WITH_DATA);
710+
mptcp_bpf_rr__destroy(skel);
711+
}
712+
700713
void test_mptcp(void)
701714
{
702715
if (test__start_subtest("base"))
@@ -713,4 +726,6 @@ void test_mptcp(void)
713726
test_first();
714727
if (test__start_subtest("bkup"))
715728
test_bkup();
729+
if (test__start_subtest("rr"))
730+
test_rr();
716731
}
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2022, SUSE. */
3+
4+
#include "mptcp_bpf.h"
5+
#include <bpf/bpf_tracing.h>
6+
7+
char _license[] SEC("license") = "GPL";
8+
9+
struct mptcp_rr_storage {
10+
struct sock *last_snd;
11+
};
12+
13+
struct {
14+
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
15+
__uint(map_flags, BPF_F_NO_PREALLOC);
16+
__type(key, int);
17+
__type(value, struct mptcp_rr_storage);
18+
} mptcp_rr_map SEC(".maps");
19+
20+
SEC("struct_ops")
21+
void BPF_PROG(mptcp_sched_rr_init, struct mptcp_sock *msk)
22+
{
23+
bpf_sk_storage_get(&mptcp_rr_map, msk, 0,
24+
BPF_LOCAL_STORAGE_GET_F_CREATE);
25+
}
26+
27+
SEC("struct_ops")
28+
void BPF_PROG(mptcp_sched_rr_release, struct mptcp_sock *msk)
29+
{
30+
bpf_sk_storage_delete(&mptcp_rr_map, msk);
31+
}
32+
33+
SEC("struct_ops")
34+
int BPF_PROG(bpf_rr_get_send, struct mptcp_sock *msk)
35+
{
36+
struct mptcp_subflow_context *subflow, *next;
37+
struct mptcp_rr_storage *ptr;
38+
39+
ptr = bpf_sk_storage_get(&mptcp_rr_map, msk, 0,
40+
BPF_LOCAL_STORAGE_GET_F_CREATE);
41+
if (!ptr)
42+
return -1;
43+
44+
next = bpf_mptcp_subflow_ctx(msk->first);
45+
if (!next)
46+
return -1;
47+
48+
if (!ptr->last_snd)
49+
goto out;
50+
51+
bpf_for_each(mptcp_subflow, subflow, (struct sock *)msk) {
52+
if (mptcp_subflow_tcp_sock(subflow) == ptr->last_snd) {
53+
subflow = bpf_iter_mptcp_subflow_next(&___it);
54+
if (!subflow)
55+
break;
56+
57+
next = subflow;
58+
break;
59+
}
60+
}
61+
62+
out:
63+
mptcp_subflow_set_scheduled(next, true);
64+
ptr->last_snd = mptcp_subflow_tcp_sock(next);
65+
return 0;
66+
}
67+
68+
SEC(".struct_ops.link")
69+
struct mptcp_sched_ops rr = {
70+
.init = (void *)mptcp_sched_rr_init,
71+
.release = (void *)mptcp_sched_rr_release,
72+
.get_send = (void *)bpf_rr_get_send,
73+
.name = "bpf_rr",
74+
};

0 commit comments

Comments
 (0)