Skip to content

Commit e45e5e8

Browse files
Geliang Tangmatttbe
authored andcommitted
bpf: Add mptcp packet scheduler struct_ops
This patch implements a new struct bpf_struct_ops: bpf_mptcp_sched_ops. Register and unregister the bpf scheduler in .reg and .unreg. Add write access for the scheduled flag of struct mptcp_subflow_context in .btf_struct_access. This MPTCP BPF scheduler implementation is similar to BPF TCP CC. And net/ipv4/bpf_tcp_ca.c is a frame of reference for this patch. Acked-by: Paolo Abeni <[email protected]> Reviewed-by: Mat Martineau <[email protected]> Co-developed-by: Matthieu Baerts <[email protected]> Signed-off-by: Matthieu Baerts <[email protected]> Co-developed-by: Gregory Detal <[email protected]> Signed-off-by: Gregory Detal <[email protected]> Signed-off-by: Geliang Tang <[email protected]>
1 parent 4fa28fe commit e45e5e8

File tree

1 file changed

+182
-0
lines changed

1 file changed

+182
-0
lines changed

net/mptcp/bpf.c

Lines changed: 182 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,187 @@
1010
#define pr_fmt(fmt) "MPTCP: " fmt
1111

1212
#include <linux/bpf.h>
13+
#include <linux/bpf_verifier.h>
14+
#include <linux/btf.h>
15+
#include <linux/btf_ids.h>
16+
#include <net/bpf_sk_storage.h>
1317
#include "protocol.h"
1418

19+
#ifdef CONFIG_BPF_JIT
20+
static struct bpf_struct_ops bpf_mptcp_sched_ops;
21+
static u32 mptcp_sock_id,
22+
mptcp_subflow_id;
23+
24+
/* MPTCP BPF packet scheduler */
25+
26+
static const struct bpf_func_proto *
27+
bpf_mptcp_sched_get_func_proto(enum bpf_func_id func_id,
28+
const struct bpf_prog *prog)
29+
{
30+
switch (func_id) {
31+
case BPF_FUNC_sk_storage_get:
32+
return &bpf_sk_storage_get_proto;
33+
case BPF_FUNC_sk_storage_delete:
34+
return &bpf_sk_storage_delete_proto;
35+
case BPF_FUNC_skc_to_tcp6_sock:
36+
return &bpf_skc_to_tcp6_sock_proto;
37+
case BPF_FUNC_skc_to_tcp_sock:
38+
return &bpf_skc_to_tcp_sock_proto;
39+
default:
40+
return bpf_base_func_proto(func_id, prog);
41+
}
42+
}
43+
44+
static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
45+
const struct bpf_reg_state *reg,
46+
int off, int size)
47+
{
48+
u32 id = reg->btf_id;
49+
size_t end;
50+
51+
if (id == mptcp_sock_id) {
52+
switch (off) {
53+
case offsetof(struct mptcp_sock, snd_burst):
54+
end = offsetofend(struct mptcp_sock, snd_burst);
55+
break;
56+
default:
57+
bpf_log(log, "no write support to mptcp_sock at off %d\n",
58+
off);
59+
return -EACCES;
60+
}
61+
} else if (id == mptcp_subflow_id) {
62+
switch (off) {
63+
case offsetof(struct mptcp_subflow_context, avg_pacing_rate):
64+
end = offsetofend(struct mptcp_subflow_context, avg_pacing_rate);
65+
break;
66+
default:
67+
bpf_log(log, "no write support to mptcp_subflow_context at off %d\n",
68+
off);
69+
return -EACCES;
70+
}
71+
} else {
72+
bpf_log(log, "only access to mptcp sock or subflow is supported\n");
73+
return -EACCES;
74+
}
75+
76+
if (off + size > end) {
77+
bpf_log(log, "access beyond %s at off %u size %u ended at %zu",
78+
id == mptcp_sock_id ? "mptcp_sock" : "mptcp_subflow_context",
79+
off, size, end);
80+
return -EACCES;
81+
}
82+
83+
return NOT_INIT;
84+
}
85+
86+
static const struct bpf_verifier_ops bpf_mptcp_sched_verifier_ops = {
87+
.get_func_proto = bpf_mptcp_sched_get_func_proto,
88+
.is_valid_access = bpf_tracing_btf_ctx_access,
89+
.btf_struct_access = bpf_mptcp_sched_btf_struct_access,
90+
};
91+
92+
static int bpf_mptcp_sched_reg(void *kdata, struct bpf_link *link)
93+
{
94+
return mptcp_register_scheduler(kdata);
95+
}
96+
97+
static void bpf_mptcp_sched_unreg(void *kdata, struct bpf_link *link)
98+
{
99+
mptcp_unregister_scheduler(kdata);
100+
}
101+
102+
static int bpf_mptcp_sched_check_member(const struct btf_type *t,
103+
const struct btf_member *member,
104+
const struct bpf_prog *prog)
105+
{
106+
return 0;
107+
}
108+
109+
static int bpf_mptcp_sched_init_member(const struct btf_type *t,
110+
const struct btf_member *member,
111+
void *kdata, const void *udata)
112+
{
113+
const struct mptcp_sched_ops *usched;
114+
struct mptcp_sched_ops *sched;
115+
u32 moff;
116+
117+
usched = (const struct mptcp_sched_ops *)udata;
118+
sched = (struct mptcp_sched_ops *)kdata;
119+
120+
moff = __btf_member_bit_offset(t, member) / 8;
121+
switch (moff) {
122+
case offsetof(struct mptcp_sched_ops, name):
123+
if (bpf_obj_name_cpy(sched->name, usched->name,
124+
sizeof(sched->name)) <= 0)
125+
return -EINVAL;
126+
return 1;
127+
}
128+
129+
return 0;
130+
}
131+
132+
static int bpf_mptcp_sched_init(struct btf *btf)
133+
{
134+
s32 type_id;
135+
136+
type_id = btf_find_by_name_kind(btf, "mptcp_sock",
137+
BTF_KIND_STRUCT);
138+
if (type_id < 0)
139+
return -EINVAL;
140+
mptcp_sock_id = type_id;
141+
142+
type_id = btf_find_by_name_kind(btf, "mptcp_subflow_context",
143+
BTF_KIND_STRUCT);
144+
if (type_id < 0)
145+
return -EINVAL;
146+
mptcp_subflow_id = type_id;
147+
148+
return 0;
149+
}
150+
151+
static int bpf_mptcp_sched_validate(void *kdata)
152+
{
153+
return mptcp_validate_scheduler(kdata);
154+
}
155+
156+
static int __bpf_mptcp_sched_get_send(struct mptcp_sock *msk)
157+
{
158+
return 0;
159+
}
160+
161+
static int __bpf_mptcp_sched_get_retrans(struct mptcp_sock *msk)
162+
{
163+
return 0;
164+
}
165+
166+
static void __bpf_mptcp_sched_init(struct mptcp_sock *msk)
167+
{
168+
}
169+
170+
static void __bpf_mptcp_sched_release(struct mptcp_sock *msk)
171+
{
172+
}
173+
174+
static struct mptcp_sched_ops __bpf_mptcp_sched_ops = {
175+
.get_send = __bpf_mptcp_sched_get_send,
176+
.get_retrans = __bpf_mptcp_sched_get_retrans,
177+
.init = __bpf_mptcp_sched_init,
178+
.release = __bpf_mptcp_sched_release,
179+
};
180+
181+
static struct bpf_struct_ops bpf_mptcp_sched_ops = {
182+
.verifier_ops = &bpf_mptcp_sched_verifier_ops,
183+
.reg = bpf_mptcp_sched_reg,
184+
.unreg = bpf_mptcp_sched_unreg,
185+
.check_member = bpf_mptcp_sched_check_member,
186+
.init_member = bpf_mptcp_sched_init_member,
187+
.init = bpf_mptcp_sched_init,
188+
.validate = bpf_mptcp_sched_validate,
189+
.name = "mptcp_sched_ops",
190+
.cfi_stubs = &__bpf_mptcp_sched_ops,
191+
};
192+
#endif /* CONFIG_BPF_JIT */
193+
15194
struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
16195
{
17196
if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
@@ -104,6 +283,9 @@ static int __init bpf_mptcp_kfunc_init(void)
104283
ret = register_btf_fmodret_id_set(&bpf_mptcp_fmodret_set);
105284
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
106285
&bpf_mptcp_iter_kfunc_set);
286+
#ifdef CONFIG_BPF_JIT
287+
ret = ret ?: register_bpf_struct_ops(&bpf_mptcp_sched_ops, mptcp_sched_ops);
288+
#endif
107289

108290
return ret;
109291
}

0 commit comments

Comments
 (0)