Skip to content

Commit 2bdac80

Browse files
committed
tgupdate: merge t/DO-NOT-MERGE-mptcp-enabled-by-default into t/upstream base
2 parents e9c7225 + 4267efe commit 2bdac80

File tree

9 files changed

+83
-130
lines changed

9 files changed

+83
-130
lines changed

net/mptcp/bpf.c

Lines changed: 28 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -224,15 +224,26 @@ struct bpf_iter_mptcp_subflow_kern {
224224
__bpf_kfunc_start_defs();
225225

226226
__bpf_kfunc static struct mptcp_subflow_context *
227-
bpf_mptcp_subflow_ctx(const struct sock *sk)
227+
bpf_mptcp_subflow_ctx(const struct sock *sk__ign)
228228
{
229+
const struct sock *sk = sk__ign;
230+
229231
if (sk && sk_fullsock(sk) &&
230232
sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
231233
return mptcp_subflow_ctx(sk);
232234

233235
return NULL;
234236
}
235237

238+
__bpf_kfunc static struct sock *
239+
bpf_mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow)
240+
{
241+
if (!subflow)
242+
return NULL;
243+
244+
return mptcp_subflow_tcp_sock(subflow);
245+
}
246+
236247
__bpf_kfunc static int
237248
bpf_iter_mptcp_subflow_new(struct bpf_iter_mptcp_subflow *it,
238249
struct sock *sk)
@@ -277,47 +288,42 @@ bpf_iter_mptcp_subflow_destroy(struct bpf_iter_mptcp_subflow *it)
277288
{
278289
}
279290

280-
__bpf_kfunc struct mptcp_subflow_context *
281-
bpf_mptcp_subflow_ctx_by_pos(const struct mptcp_sched_data *data, unsigned int pos)
291+
__bpf_kfunc static bool bpf_mptcp_subflow_queues_empty(struct sock *sk)
282292
{
283-
if (pos >= MPTCP_SUBFLOWS_MAX)
284-
return NULL;
285-
return data->contexts[pos];
293+
return tcp_rtx_queue_empty(sk);
286294
}
287295

288-
__bpf_kfunc static bool bpf_mptcp_subflow_queues_empty(struct sock *sk)
296+
__bpf_kfunc static bool bpf_sk_stream_memory_free(const struct sock *sk__ign)
289297
{
290-
return tcp_rtx_queue_empty(sk);
298+
const struct sock *sk = sk__ign;
299+
300+
if (sk && sk_fullsock(sk) &&
301+
sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
302+
return sk_stream_memory_free(sk);
303+
304+
return NULL;
291305
}
292306

293307
__bpf_kfunc_end_defs();
294308

295309
BTF_KFUNCS_START(bpf_mptcp_common_kfunc_ids)
296310
BTF_ID_FLAGS(func, bpf_mptcp_subflow_ctx, KF_RET_NULL)
311+
BTF_ID_FLAGS(func, bpf_mptcp_subflow_tcp_sock, KF_RET_NULL)
297312
BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
298313
BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_next, KF_ITER_NEXT | KF_RET_NULL)
299314
BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_destroy, KF_ITER_DESTROY)
300-
BTF_KFUNCS_END(bpf_mptcp_common_kfunc_ids)
301-
302-
static const struct btf_kfunc_id_set bpf_mptcp_common_kfunc_set = {
303-
.owner = THIS_MODULE,
304-
.set = &bpf_mptcp_common_kfunc_ids,
305-
};
306-
307-
BTF_KFUNCS_START(bpf_mptcp_sched_kfunc_ids)
308315
BTF_ID_FLAGS(func, mptcp_subflow_set_scheduled)
309-
BTF_ID_FLAGS(func, bpf_mptcp_subflow_ctx_by_pos)
310316
BTF_ID_FLAGS(func, mptcp_subflow_active)
311317
BTF_ID_FLAGS(func, mptcp_set_timeout)
312318
BTF_ID_FLAGS(func, mptcp_wnd_end)
313-
BTF_ID_FLAGS(func, tcp_stream_memory_free)
319+
BTF_ID_FLAGS(func, bpf_sk_stream_memory_free, KF_RET_NULL)
314320
BTF_ID_FLAGS(func, bpf_mptcp_subflow_queues_empty)
315321
BTF_ID_FLAGS(func, mptcp_pm_subflow_chk_stale, KF_SLEEPABLE)
316-
BTF_KFUNCS_END(bpf_mptcp_sched_kfunc_ids)
322+
BTF_KFUNCS_END(bpf_mptcp_common_kfunc_ids)
317323

318-
static const struct btf_kfunc_id_set bpf_mptcp_sched_kfunc_set = {
324+
static const struct btf_kfunc_id_set bpf_mptcp_common_kfunc_set = {
319325
.owner = THIS_MODULE,
320-
.set = &bpf_mptcp_sched_kfunc_ids,
326+
.set = &bpf_mptcp_common_kfunc_ids,
321327
};
322328

323329
static int __init bpf_mptcp_kfunc_init(void)
@@ -328,7 +334,7 @@ static int __init bpf_mptcp_kfunc_init(void)
328334
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCKOPT,
329335
&bpf_mptcp_common_kfunc_set);
330336
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
331-
&bpf_mptcp_sched_kfunc_set);
337+
&bpf_mptcp_common_kfunc_set);
332338
#ifdef CONFIG_BPF_JIT
333339
ret = ret ?: register_bpf_struct_ops(&bpf_mptcp_sched_ops, mptcp_sched_ops);
334340
#endif

net/mptcp/protocol.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -718,8 +718,6 @@ void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
718718
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
719719
u64 mptcp_wnd_end(const struct mptcp_sock *msk);
720720
void mptcp_set_timeout(struct sock *sk);
721-
struct mptcp_subflow_context *
722-
bpf_mptcp_subflow_ctx_by_pos(const struct mptcp_sched_data *data, unsigned int pos);
723721
struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk);
724722
bool __mptcp_close(struct sock *sk, long timeout);
725723
void mptcp_cancel_work(struct sock *sk);

net/mptcp/sched.c

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -157,21 +157,6 @@ void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
157157
static void mptcp_sched_data_set_contexts(const struct mptcp_sock *msk,
158158
struct mptcp_sched_data *data)
159159
{
160-
struct mptcp_subflow_context *subflow;
161-
int i = 0;
162-
163-
mptcp_for_each_subflow(msk, subflow) {
164-
if (i == MPTCP_SUBFLOWS_MAX) {
165-
pr_warn_once("too many subflows");
166-
break;
167-
}
168-
mptcp_subflow_set_scheduled(subflow, false);
169-
data->contexts[i++] = subflow;
170-
}
171-
data->subflows = i;
172-
173-
for (; i < MPTCP_SUBFLOWS_MAX; i++)
174-
data->contexts[i] = NULL;
175160
}
176161

177162
int mptcp_sched_get_send(struct mptcp_sock *msk)

tools/testing/selftests/bpf/progs/mptcp_bpf.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,11 +45,10 @@ mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow)
4545
/* ksym */
4646
extern struct mptcp_subflow_context *
4747
bpf_mptcp_subflow_ctx(const struct sock *sk) __ksym;
48+
extern struct sock *
49+
bpf_mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow) __ksym;
4850

4951
extern void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
5052
bool scheduled) __ksym;
5153

52-
extern struct mptcp_subflow_context *
53-
bpf_mptcp_subflow_ctx_by_pos(const struct mptcp_sched_data *data, unsigned int pos) __ksym;
54-
5554
#endif

tools/testing/selftests/bpf/progs/mptcp_bpf_bkup.c

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,26 +20,16 @@ SEC("struct_ops")
2020
int BPF_PROG(bpf_bkup_get_send, struct mptcp_sock *msk,
2121
struct mptcp_sched_data *data)
2222
{
23-
int nr = -1;
24-
25-
for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
26-
struct mptcp_subflow_context *subflow;
27-
28-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
29-
if (!subflow)
30-
break;
23+
struct mptcp_subflow_context *subflow;
3124

25+
bpf_for_each(mptcp_subflow, subflow, (struct sock *)msk) {
3226
if (!BPF_CORE_READ_BITFIELD_PROBED(subflow, backup) ||
3327
!BPF_CORE_READ_BITFIELD_PROBED(subflow, request_bkup)) {
34-
nr = i;
28+
mptcp_subflow_set_scheduled(subflow, true);
3529
break;
3630
}
3731
}
3832

39-
if (nr != -1) {
40-
mptcp_subflow_set_scheduled(bpf_mptcp_subflow_ctx_by_pos(data, nr), true);
41-
return -1;
42-
}
4333
return 0;
4434
}
4535

tools/testing/selftests/bpf/progs/mptcp_bpf_burst.c

Lines changed: 26 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,10 @@ char _license[] SEC("license") = "GPL";
1111

1212
#define min(a, b) ((a) < (b) ? (a) : (b))
1313

14-
struct bpf_subflow_send_info {
15-
__u8 subflow_id;
16-
__u64 linger_time;
17-
};
18-
1914
extern bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) __ksym;
2015
extern void mptcp_set_timeout(struct sock *sk) __ksym;
2116
extern __u64 mptcp_wnd_end(const struct mptcp_sock *msk) __ksym;
22-
extern bool tcp_stream_memory_free(const struct sock *sk, int wake) __ksym;
17+
extern bool bpf_sk_stream_memory_free(const struct sock *sk) __ksym;
2318
extern bool bpf_mptcp_subflow_queues_empty(struct sock *sk) __ksym;
2419
extern void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) __ksym;
2520

@@ -44,19 +39,6 @@ static __always_inline bool tcp_rtx_and_write_queues_empty(struct sock *sk)
4439
return bpf_mptcp_subflow_queues_empty(sk) && tcp_write_queue_empty(sk);
4540
}
4641

47-
static __always_inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
48-
{
49-
if (sk->sk_wmem_queued >= sk->sk_sndbuf)
50-
return false;
51-
52-
return tcp_stream_memory_free(sk, wake);
53-
}
54-
55-
static __always_inline bool sk_stream_memory_free(const struct sock *sk)
56-
{
57-
return __sk_stream_memory_free(sk, 0);
58-
}
59-
6042
SEC("struct_ops")
6143
void BPF_PROG(mptcp_sched_burst_init, struct mptcp_sock *msk)
6244
{
@@ -71,7 +53,7 @@ SEC("struct_ops")
7153
int BPF_PROG(bpf_burst_get_send, struct mptcp_sock *msk,
7254
struct mptcp_sched_data *data)
7355
{
74-
struct bpf_subflow_send_info send_info[SSK_MODE_MAX];
56+
struct subflow_send_info send_info[SSK_MODE_MAX];
7557
struct mptcp_subflow_context *subflow;
7658
struct sock *sk = (struct sock *)msk;
7759
__u32 pace, burst, wmem;
@@ -81,18 +63,12 @@ int BPF_PROG(bpf_burst_get_send, struct mptcp_sock *msk,
8163

8264
/* pick the subflow with the lower wmem/wspace ratio */
8365
for (i = 0; i < SSK_MODE_MAX; ++i) {
84-
send_info[i].subflow_id = MPTCP_SUBFLOWS_MAX;
66+
send_info[i].ssk = NULL;
8567
send_info[i].linger_time = -1;
8668
}
8769

88-
for (i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
89-
bool backup;
90-
91-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
92-
if (!subflow)
93-
break;
94-
95-
backup = subflow->backup || subflow->request_bkup;
70+
bpf_for_each(mptcp_subflow, subflow, sk) {
71+
bool backup = subflow->backup || subflow->request_bkup;
9672

9773
ssk = mptcp_subflow_tcp_sock(subflow);
9874
if (!mptcp_subflow_active(subflow))
@@ -110,24 +86,26 @@ int BPF_PROG(bpf_burst_get_send, struct mptcp_sock *msk,
11086

11187
linger_time = div_u64((__u64)ssk->sk_wmem_queued << 32, pace);
11288
if (linger_time < send_info[backup].linger_time) {
113-
send_info[backup].subflow_id = i;
89+
send_info[backup].ssk = ssk;
11490
send_info[backup].linger_time = linger_time;
11591
}
11692
}
11793
mptcp_set_timeout(sk);
11894

11995
/* pick the best backup if no other subflow is active */
12096
if (!nr_active)
121-
send_info[SSK_MODE_ACTIVE].subflow_id = send_info[SSK_MODE_BACKUP].subflow_id;
97+
send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
12298

123-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, send_info[SSK_MODE_ACTIVE].subflow_id);
124-
if (!subflow)
99+
ssk = send_info[SSK_MODE_ACTIVE].ssk;
100+
if (!ssk || !bpf_sk_stream_memory_free(ssk))
125101
return -1;
126-
ssk = mptcp_subflow_tcp_sock(subflow);
127-
if (!ssk || !sk_stream_memory_free(ssk))
102+
103+
subflow = bpf_mptcp_subflow_ctx(ssk);
104+
if (!subflow)
128105
return -1;
129106

130107
burst = min(MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
108+
ssk = bpf_core_cast(ssk, struct sock);
131109
wmem = ssk->sk_wmem_queued;
132110
if (!burst)
133111
goto out;
@@ -146,20 +124,16 @@ SEC("struct_ops")
146124
int BPF_PROG(bpf_burst_get_retrans, struct mptcp_sock *msk,
147125
struct mptcp_sched_data *data)
148126
{
149-
int backup = MPTCP_SUBFLOWS_MAX, pick = MPTCP_SUBFLOWS_MAX, subflow_id;
127+
struct sock *backup = NULL, *pick = NULL;
150128
struct mptcp_subflow_context *subflow;
151129
int min_stale_count = INT_MAX;
152-
struct sock *ssk;
153130

154-
for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
155-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
156-
if (!subflow)
157-
break;
131+
bpf_for_each(mptcp_subflow, subflow, (struct sock *)msk) {
132+
struct sock *ssk = bpf_mptcp_subflow_tcp_sock(subflow);
158133

159-
if (!mptcp_subflow_active(subflow))
134+
if (!ssk || !mptcp_subflow_active(subflow))
160135
continue;
161136

162-
ssk = mptcp_subflow_tcp_sock(subflow);
163137
/* still data outstanding at TCP level? skip this */
164138
if (!tcp_rtx_and_write_queues_empty(ssk)) {
165139
mptcp_pm_subflow_chk_stale(msk, ssk);
@@ -168,23 +142,23 @@ int BPF_PROG(bpf_burst_get_retrans, struct mptcp_sock *msk,
168142
}
169143

170144
if (subflow->backup || subflow->request_bkup) {
171-
if (backup == MPTCP_SUBFLOWS_MAX)
172-
backup = i;
145+
if (!backup)
146+
backup = ssk;
173147
continue;
174148
}
175149

176-
if (pick == MPTCP_SUBFLOWS_MAX)
177-
pick = i;
150+
if (!pick)
151+
pick = ssk;
178152
}
179153

180-
if (pick < MPTCP_SUBFLOWS_MAX) {
181-
subflow_id = pick;
154+
if (pick)
182155
goto out;
183-
}
184-
subflow_id = min_stale_count > 1 ? backup : MPTCP_SUBFLOWS_MAX;
156+
pick = min_stale_count > 1 ? backup : NULL;
185157

186158
out:
187-
subflow = bpf_mptcp_subflow_ctx_by_pos(data, subflow_id);
159+
if (!pick)
160+
return -1;
161+
subflow = bpf_mptcp_subflow_ctx(pick);
188162
if (!subflow)
189163
return -1;
190164
mptcp_subflow_set_scheduled(subflow, true);

tools/testing/selftests/bpf/progs/mptcp_bpf_first.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,13 @@ SEC("struct_ops")
2020
int BPF_PROG(bpf_first_get_send, struct mptcp_sock *msk,
2121
struct mptcp_sched_data *data)
2222
{
23-
mptcp_subflow_set_scheduled(bpf_mptcp_subflow_ctx_by_pos(data, 0), true);
23+
struct mptcp_subflow_context *subflow;
24+
25+
subflow = bpf_mptcp_subflow_ctx(msk->first);
26+
if (!subflow)
27+
return -1;
28+
29+
mptcp_subflow_set_scheduled(subflow, true);
2430
return 0;
2531
}
2632

tools/testing/selftests/bpf/progs/mptcp_bpf_red.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,10 @@ SEC("struct_ops")
2020
int BPF_PROG(bpf_red_get_send, struct mptcp_sock *msk,
2121
struct mptcp_sched_data *data)
2222
{
23-
for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
24-
if (!bpf_mptcp_subflow_ctx_by_pos(data, i))
25-
break;
23+
struct mptcp_subflow_context *subflow;
2624

27-
mptcp_subflow_set_scheduled(bpf_mptcp_subflow_ctx_by_pos(data, i), true);
28-
}
25+
bpf_for_each(mptcp_subflow, subflow, (struct sock *)msk)
26+
mptcp_subflow_set_scheduled(subflow, true);
2927

3028
return 0;
3129
}

0 commit comments

Comments
 (0)