Skip to content

Commit 8024409

Browse files
Geliang Tangmatttbe
authored andcommitted
bpf: Add bpf_mptcp_sched_ops
This patch implements a new struct bpf_struct_ops: bpf_mptcp_sched_ops. Register and unregister the bpf scheduler in .reg and .unreg. Add write access for the scheduled flag of struct mptcp_subflow_context in .btf_struct_access. This MPTCP BPF scheduler implementation is similar to BPF TCP CC. And net/ipv4/bpf_tcp_ca.c is a frame of reference for this patch. Acked-by: Paolo Abeni <[email protected]> Reviewed-by: Mat Martineau <[email protected]> Co-developed-by: Matthieu Baerts <[email protected]> Signed-off-by: Matthieu Baerts <[email protected]> Co-developed-by: Gregory Detal <[email protected]> Signed-off-by: Gregory Detal <[email protected]> Signed-off-by: Geliang Tang <[email protected]>
1 parent e82016b commit 8024409

File tree

1 file changed

+186
-0
lines changed

1 file changed

+186
-0
lines changed

net/mptcp/bpf.c

Lines changed: 186 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,191 @@
1010
#define pr_fmt(fmt) "MPTCP: " fmt
1111

1212
#include <linux/bpf.h>
13+
#include <linux/bpf_verifier.h>
14+
#include <linux/btf.h>
15+
#include <linux/btf_ids.h>
16+
#include <net/bpf_sk_storage.h>
1317
#include "protocol.h"
1418

19+
#ifdef CONFIG_BPF_JIT
20+
static struct bpf_struct_ops bpf_mptcp_sched_ops;
21+
static const struct btf_type *mptcp_sock_type, *mptcp_subflow_type __read_mostly;
22+
static u32 mptcp_sock_id, mptcp_subflow_id;
23+
24+
static const struct bpf_func_proto *
25+
bpf_mptcp_sched_get_func_proto(enum bpf_func_id func_id,
26+
const struct bpf_prog *prog)
27+
{
28+
switch (func_id) {
29+
case BPF_FUNC_sk_storage_get:
30+
return &bpf_sk_storage_get_proto;
31+
case BPF_FUNC_sk_storage_delete:
32+
return &bpf_sk_storage_delete_proto;
33+
case BPF_FUNC_skc_to_tcp6_sock:
34+
return &bpf_skc_to_tcp6_sock_proto;
35+
case BPF_FUNC_skc_to_tcp_sock:
36+
return &bpf_skc_to_tcp_sock_proto;
37+
default:
38+
return bpf_base_func_proto(func_id, prog);
39+
}
40+
}
41+
42+
static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
43+
const struct bpf_reg_state *reg,
44+
int off, int size)
45+
{
46+
const struct btf_type *t;
47+
size_t end;
48+
49+
t = btf_type_by_id(reg->btf, reg->btf_id);
50+
51+
if (t == mptcp_sock_type) {
52+
switch (off) {
53+
case offsetof(struct mptcp_sock, snd_burst):
54+
end = offsetofend(struct mptcp_sock, snd_burst);
55+
break;
56+
default:
57+
bpf_log(log, "no write support to mptcp_sock at off %d\n",
58+
off);
59+
return -EACCES;
60+
}
61+
} else if (t == mptcp_subflow_type) {
62+
switch (off) {
63+
case offsetof(struct mptcp_subflow_context, avg_pacing_rate):
64+
end = offsetofend(struct mptcp_subflow_context, avg_pacing_rate);
65+
break;
66+
default:
67+
bpf_log(log, "no write support to mptcp_subflow_context at off %d\n",
68+
off);
69+
return -EACCES;
70+
}
71+
} else {
72+
bpf_log(log, "only access to mptcp sock or subflow is supported\n");
73+
return -EACCES;
74+
}
75+
76+
if (off + size > end) {
77+
bpf_log(log, "access beyond %s at off %u size %u ended at %zu",
78+
t == mptcp_sock_type ? "mptcp_sock" : "mptcp_subflow_context",
79+
off, size, end);
80+
return -EACCES;
81+
}
82+
83+
return NOT_INIT;
84+
}
85+
86+
static const struct bpf_verifier_ops bpf_mptcp_sched_verifier_ops = {
87+
.get_func_proto = bpf_mptcp_sched_get_func_proto,
88+
.is_valid_access = bpf_tracing_btf_ctx_access,
89+
.btf_struct_access = bpf_mptcp_sched_btf_struct_access,
90+
};
91+
92+
static int bpf_mptcp_sched_reg(void *kdata, struct bpf_link *link)
93+
{
94+
return mptcp_register_scheduler(kdata);
95+
}
96+
97+
static void bpf_mptcp_sched_unreg(void *kdata, struct bpf_link *link)
98+
{
99+
mptcp_unregister_scheduler(kdata);
100+
}
101+
102+
static int bpf_mptcp_sched_check_member(const struct btf_type *t,
103+
const struct btf_member *member,
104+
const struct bpf_prog *prog)
105+
{
106+
return 0;
107+
}
108+
109+
static int bpf_mptcp_sched_init_member(const struct btf_type *t,
110+
const struct btf_member *member,
111+
void *kdata, const void *udata)
112+
{
113+
const struct mptcp_sched_ops *usched;
114+
struct mptcp_sched_ops *sched;
115+
u32 moff;
116+
int ret;
117+
118+
usched = (const struct mptcp_sched_ops *)udata;
119+
sched = (struct mptcp_sched_ops *)kdata;
120+
121+
moff = __btf_member_bit_offset(t, member) / 8;
122+
switch (moff) {
123+
case offsetof(struct mptcp_sched_ops, name):
124+
if (bpf_obj_name_cpy(sched->name, usched->name,
125+
sizeof(sched->name)) <= 0)
126+
return -EINVAL;
127+
128+
rcu_read_lock();
129+
ret = mptcp_sched_find(usched->name) ? -EEXIST : 1;
130+
rcu_read_unlock();
131+
132+
return ret;
133+
}
134+
135+
return 0;
136+
}
137+
138+
static int bpf_mptcp_sched_init(struct btf *btf)
139+
{
140+
s32 type_id;
141+
142+
type_id = btf_find_by_name_kind(btf, "mptcp_sock",
143+
BTF_KIND_STRUCT);
144+
if (type_id < 0)
145+
return -EINVAL;
146+
mptcp_sock_id = type_id;
147+
mptcp_sock_type = btf_type_by_id(btf, mptcp_sock_id);
148+
149+
type_id = btf_find_by_name_kind(btf, "mptcp_subflow_context",
150+
BTF_KIND_STRUCT);
151+
if (type_id < 0)
152+
return -EINVAL;
153+
mptcp_subflow_id = type_id;
154+
mptcp_subflow_type = btf_type_by_id(btf, mptcp_subflow_id);
155+
156+
return 0;
157+
}
158+
159+
static int __bpf_mptcp_sched_get_send(struct mptcp_sock *msk,
160+
struct mptcp_sched_data *data)
161+
{
162+
return 0;
163+
}
164+
165+
static int __bpf_mptcp_sched_get_retrans(struct mptcp_sock *msk,
166+
struct mptcp_sched_data *data)
167+
{
168+
return 0;
169+
}
170+
171+
static void __bpf_mptcp_sched_init(struct mptcp_sock *msk)
172+
{
173+
}
174+
175+
static void __bpf_mptcp_sched_release(struct mptcp_sock *msk)
176+
{
177+
}
178+
179+
static struct mptcp_sched_ops __bpf_mptcp_sched_ops = {
180+
.get_send = __bpf_mptcp_sched_get_send,
181+
.get_retrans = __bpf_mptcp_sched_get_retrans,
182+
.init = __bpf_mptcp_sched_init,
183+
.release = __bpf_mptcp_sched_release,
184+
};
185+
186+
static struct bpf_struct_ops bpf_mptcp_sched_ops = {
187+
.verifier_ops = &bpf_mptcp_sched_verifier_ops,
188+
.reg = bpf_mptcp_sched_reg,
189+
.unreg = bpf_mptcp_sched_unreg,
190+
.check_member = bpf_mptcp_sched_check_member,
191+
.init_member = bpf_mptcp_sched_init_member,
192+
.init = bpf_mptcp_sched_init,
193+
.name = "mptcp_sched_ops",
194+
.cfi_stubs = &__bpf_mptcp_sched_ops,
195+
};
196+
#endif /* CONFIG_BPF_JIT */
197+
15198
struct mptcp_sock *bpf_mptcp_sock_from_sock(struct sock *sk)
16199
{
17200
if (unlikely(!sk || !sk_fullsock(sk)))
@@ -145,6 +328,9 @@ static int __init bpf_mptcp_kfunc_init(void)
145328
ret = register_btf_fmodret_id_set(&bpf_mptcp_fmodret_set);
146329
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCKOPT,
147330
&bpf_mptcp_common_kfunc_set);
331+
#ifdef CONFIG_BPF_JIT
332+
ret = ret ?: register_bpf_struct_ops(&bpf_mptcp_sched_ops, mptcp_sched_ops);
333+
#endif
148334

149335
return ret;
150336
}

0 commit comments

Comments
 (0)